MAhmedCh commited on
Commit
fe39cc9
·
1 Parent(s): 9bad15a

training and testing code for AbdCTBench

Browse files
code/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2020 apyrros
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
code/config/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # Configuration module for multi-task comorbidity detection
code/config/biomarker_config.py ADDED
@@ -0,0 +1,413 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Flexible Biomarker Configuration System
3
+ Supports dynamic task configuration without hardcoded assumptions
4
+ """
5
+
6
+ import yaml
7
+ import json
8
+ from dataclasses import dataclass
9
+ from typing import List, Dict, Any, Optional, Tuple
10
+ import pandas as pd
11
+ import numpy as np
12
+
13
+
14
+ @dataclass
15
+ class BinaryBiomarker:
16
+ """Configuration for a binary classification task"""
17
+ name: str
18
+ description: str
19
+ positive_class: str
20
+ negative_class: str = "ABSENT"
21
+ class_weight: Optional[float] = None
22
+
23
+
24
+ @dataclass
25
+ class MulticlassBiomarker:
26
+ """Configuration for a multiclass classification task"""
27
+ name: str
28
+ description: str
29
+ classes: List[str]
30
+ class_weights: Optional[Dict[str, float]] = None
31
+
32
+
33
+ @dataclass
34
+ class ContinuousBiomarker:
35
+ """Configuration for a regression task"""
36
+ name: str
37
+ description: str
38
+ min_value: float
39
+ max_value: float
40
+ normalization: str = "min_max" # "min_max", "z_score", or "none"
41
+
42
+ def __post_init__(self):
43
+ """Validate continuous biomarker configuration."""
44
+ if self.max_value <= self.min_value:
45
+ raise ValueError(
46
+ f"Invalid range for {self.name}: max_value ({self.max_value}) "
47
+ f"must be greater than min_value ({self.min_value})"
48
+ )
49
+ if self.normalization not in {"min_max", "z_score", "none"}:
50
+ raise ValueError(
51
+ f"Unsupported normalization '{self.normalization}' for {self.name}. "
52
+ "Expected one of: min_max, z_score, none."
53
+ )
54
+
55
+ def normalize(self, value: float) -> float:
56
+ """Normalize a continuous value based on the configured normalization method"""
57
+ if self.normalization == "min_max":
58
+ # Min-max normalization to [0, 1]
59
+ return (value - self.min_value) / (self.max_value - self.min_value)
60
+ elif self.normalization == "z_score":
61
+ # Z-score normalization (would need mean and std, using min_max for now)
62
+ return (value - self.min_value) / (self.max_value - self.min_value)
63
+ elif self.normalization == "none":
64
+ # No normalization
65
+ return value
66
+ else:
67
+ # Default to min_max
68
+ return (value - self.min_value) / (self.max_value - self.min_value)
69
+
70
+ def denormalize(self, normalized_value: float) -> float:
71
+ """Denormalize a normalized value back to original scale"""
72
+ if self.normalization == "min_max":
73
+ # Reverse min-max normalization from [0, 1] to original range
74
+ return normalized_value * (self.max_value - self.min_value) + self.min_value
75
+ elif self.normalization == "z_score":
76
+ # Reverse z-score normalization (would need mean and std, using min_max for now)
77
+ return normalized_value * (self.max_value - self.min_value) + self.min_value
78
+ elif self.normalization == "none":
79
+ # No normalization
80
+ return normalized_value
81
+ else:
82
+ # Default to min_max
83
+ return normalized_value * (self.max_value - self.min_value) + self.min_value
84
+
85
+
86
+ @dataclass
87
+ class TensorLayout:
88
+ """Describes where each biomarker appears in the output tensor"""
89
+ biomarker_name: str
90
+ start_idx: int
91
+ end_idx: int
92
+ size: int
93
+ task_type: str # "binary", "multiclass", "continuous"
94
+
95
+
96
+ class FlexibleBiomarkerConfig:
97
+ """Flexible biomarker configuration that adapts to any task structure"""
98
+
99
+ def __init__(self, config_path: Optional[str] = None):
100
+ self.experiment_name: str = ""
101
+ self.description: str = ""
102
+ self.binary_biomarkers: List[BinaryBiomarker] = []
103
+ self.multiclass_biomarkers: List[MulticlassBiomarker] = []
104
+ self.continuous_biomarkers: List[ContinuousBiomarker] = []
105
+ self.preprocessing: Dict[str, Any] = {}
106
+ self.training: Dict[str, Any] = {}
107
+ self.validation: Dict[str, Any] = {}
108
+
109
+ if config_path:
110
+ self.load_from_file(config_path)
111
+
112
+ def load_from_file(self, config_path: str):
113
+ """Load configuration from YAML or JSON file"""
114
+ if config_path.endswith('.yaml') or config_path.endswith('.yml'):
115
+ with open(config_path, 'r') as f:
116
+ config_data = yaml.safe_load(f)
117
+ elif config_path.endswith('.json'):
118
+ with open(config_path, 'r') as f:
119
+ config_data = json.load(f)
120
+ else:
121
+ raise ValueError(f"Unsupported config file format: {config_path}")
122
+
123
+ self._parse_config(config_data)
124
+
125
+ def _parse_config(self, config_data: Dict[str, Any]):
126
+ """Parse configuration data"""
127
+ self.experiment_name = config_data.get('experiment_name', '')
128
+ self.description = config_data.get('description', '')
129
+
130
+ # Parse binary biomarkers
131
+ binary_data = config_data.get('binary_biomarkers', [])
132
+ self.binary_biomarkers = [
133
+ BinaryBiomarker(
134
+ name=b['name'],
135
+ description=b['description'],
136
+ positive_class=b['positive_class'],
137
+ negative_class=b.get('negative_class', 'ABSENT'),
138
+ class_weight=b.get('class_weight')
139
+ )
140
+ for b in binary_data
141
+ ]
142
+
143
+ # Parse multiclass biomarkers
144
+ multiclass_data = config_data.get('multiclass_biomarkers', [])
145
+ self.multiclass_biomarkers = [
146
+ MulticlassBiomarker(
147
+ name=m['name'],
148
+ description=m['description'],
149
+ classes=m['classes'],
150
+ class_weights=m.get('class_weights')
151
+ )
152
+ for m in multiclass_data
153
+ ]
154
+
155
+ # Parse continuous biomarkers
156
+ continuous_data = config_data.get('continuous_biomarkers', [])
157
+ self.continuous_biomarkers = [
158
+ ContinuousBiomarker(
159
+ name=c['name'],
160
+ description=c['description'],
161
+ min_value=c['min_value'],
162
+ max_value=c['max_value'],
163
+ normalization=c.get('normalization', 'min_max')
164
+ )
165
+ for c in continuous_data
166
+ ]
167
+
168
+ # Parse other settings
169
+ self.preprocessing = config_data.get('preprocessing', {})
170
+ self.training = config_data.get('training', {})
171
+ self.validation = config_data.get('validation', {})
172
+
173
+ @property
174
+ def num_binary_tasks(self) -> int:
175
+ """Number of binary classification tasks"""
176
+ return len(self.binary_biomarkers)
177
+
178
+ @property
179
+ def num_multiclass_tasks(self) -> int:
180
+ """Number of multiclass classification tasks"""
181
+ return len(self.multiclass_biomarkers)
182
+
183
+ @property
184
+ def num_continuous_tasks(self) -> int:
185
+ """Number of regression tasks"""
186
+ return len(self.continuous_biomarkers)
187
+
188
+ @property
189
+ def total_multiclass_outputs(self) -> int:
190
+ """Total outputs needed for all multiclass tasks"""
191
+ return sum(len(m.classes) for m in self.multiclass_biomarkers)
192
+
193
+ @property
194
+ def total_output_size(self) -> int:
195
+ """Total size of output tensor"""
196
+ return (self.num_binary_tasks +
197
+ self.total_multiclass_outputs +
198
+ self.num_continuous_tasks)
199
+
200
+ def get_tensor_layout(self) -> Dict[str, TensorLayout]:
201
+ """Get the layout of biomarkers in the output tensor"""
202
+ layout = {}
203
+ current_idx = 0
204
+
205
+ # Binary biomarkers (1 output each)
206
+ for biomarker in self.binary_biomarkers:
207
+ layout[biomarker.name] = TensorLayout(
208
+ biomarker_name=biomarker.name,
209
+ start_idx=current_idx,
210
+ end_idx=current_idx + 1,
211
+ size=1,
212
+ task_type="binary"
213
+ )
214
+ current_idx += 1
215
+
216
+ # Multiclass biomarkers (n outputs each)
217
+ for biomarker in self.multiclass_biomarkers:
218
+ num_classes = len(biomarker.classes)
219
+ layout[biomarker.name] = TensorLayout(
220
+ biomarker_name=biomarker.name,
221
+ start_idx=current_idx,
222
+ end_idx=current_idx + num_classes,
223
+ size=num_classes,
224
+ task_type="multiclass"
225
+ )
226
+ current_idx += num_classes
227
+
228
+ # Continuous biomarkers (1 output each)
229
+ for biomarker in self.continuous_biomarkers:
230
+ layout[biomarker.name] = TensorLayout(
231
+ biomarker_name=biomarker.name,
232
+ start_idx=current_idx,
233
+ end_idx=current_idx + 1,
234
+ size=1,
235
+ task_type="continuous"
236
+ )
237
+ current_idx += 1
238
+
239
+ return layout
240
+
241
+ def get_all_biomarker_names(self) -> List[str]:
242
+ """Get names of all biomarkers"""
243
+ names = []
244
+ names.extend([b.name for b in self.binary_biomarkers])
245
+ names.extend([m.name for m in self.multiclass_biomarkers])
246
+ names.extend([c.name for c in self.continuous_biomarkers])
247
+ return names
248
+
249
+ def validate_dataset_compatibility(self, df: pd.DataFrame) -> Tuple[bool, List[str]]:
250
+ """Check if dataset has all required biomarker columns"""
251
+ required_columns = self.get_all_biomarker_names()
252
+ missing_columns = [col for col in required_columns if col not in df.columns]
253
+
254
+ is_compatible = len(missing_columns) == 0
255
+ return is_compatible, missing_columns
256
+
257
+ def prepare_targets_tensor(self, df: pd.DataFrame, indices: Optional[List[int]] = None) -> np.ndarray:
258
+ """
259
+ Convert dataframe rows to target tensors for training
260
+
261
+ Args:
262
+ df: DataFrame with biomarker columns
263
+ indices: Optional list of row indices to process (if None, process all)
264
+
265
+ Returns:
266
+ numpy array of shape [num_samples, total_output_size]
267
+ """
268
+ if indices is None:
269
+ indices = list(range(len(df)))
270
+
271
+ num_samples = len(indices)
272
+ targets = np.zeros((num_samples, self.total_output_size))
273
+ layout = self.get_tensor_layout()
274
+
275
+ for i, row_idx in enumerate(indices):
276
+ row = df.iloc[row_idx]
277
+
278
+ # Process binary biomarkers
279
+ for biomarker in self.binary_biomarkers:
280
+ tensor_info = layout[biomarker.name]
281
+ value = row[biomarker.name]
282
+
283
+ # Convert to binary (1 if positive_class, 0 otherwise)
284
+ if pd.isna(value):
285
+ binary_value = 0.0 # Default to negative class for missing values
286
+ elif str(value).upper() == biomarker.positive_class.upper():
287
+ binary_value = 1.0
288
+ else:
289
+ binary_value = 0.0
290
+
291
+ targets[i, tensor_info.start_idx] = binary_value
292
+
293
+ # Process multiclass biomarkers
294
+ for biomarker in self.multiclass_biomarkers:
295
+ tensor_info = layout[biomarker.name]
296
+ value = str(row[biomarker.name]).upper()
297
+
298
+ # Create one-hot encoding
299
+ class_idx = -1
300
+ for j, class_name in enumerate(biomarker.classes):
301
+ if value == class_name.upper():
302
+ class_idx = j
303
+ break
304
+
305
+ if class_idx >= 0:
306
+ targets[i, tensor_info.start_idx + class_idx] = 1.0
307
+ # If no match found, leave as zeros (unknown class)
308
+
309
+ # Process continuous biomarkers
310
+ for biomarker in self.continuous_biomarkers:
311
+ tensor_info = layout[biomarker.name]
312
+ value = row[biomarker.name]
313
+
314
+ if pd.isna(value):
315
+ normalized_value = 0.0 # Default for missing values
316
+ else:
317
+ normalized_value = biomarker.normalize(float(value))
318
+ if biomarker.normalization in {"min_max", "z_score"}:
319
+ # Keep normalized targets bounded for training stability.
320
+ normalized_value = float(np.clip(normalized_value, 0.0, 1.0))
321
+
322
+ targets[i, tensor_info.start_idx] = normalized_value
323
+
324
+ return targets
325
+
326
+ def denormalize_continuous_predictions(self, predictions: np.ndarray) -> Dict[str, np.ndarray]:
327
+ """Convert normalized continuous predictions back to original scale"""
328
+ layout = self.get_tensor_layout()
329
+ denormalized = {}
330
+
331
+ for biomarker in self.continuous_biomarkers:
332
+ tensor_info = layout[biomarker.name]
333
+ normalized_preds = predictions[:, tensor_info.start_idx]
334
+
335
+ original_preds = np.array([biomarker.denormalize(v) for v in normalized_preds], dtype=np.float32)
336
+ denormalized[biomarker.name] = original_preds
337
+
338
+ return denormalized
339
+
340
+ def save_to_file(self, file_path: str):
341
+ """Save configuration to file"""
342
+ config_data = {
343
+ 'experiment_name': self.experiment_name,
344
+ 'description': self.description,
345
+ 'binary_biomarkers': [
346
+ {
347
+ 'name': b.name,
348
+ 'description': b.description,
349
+ 'positive_class': b.positive_class,
350
+ 'negative_class': b.negative_class
351
+ }
352
+ for b in self.binary_biomarkers
353
+ ],
354
+ 'multiclass_biomarkers': [
355
+ {
356
+ 'name': m.name,
357
+ 'description': m.description,
358
+ 'classes': m.classes
359
+ }
360
+ for m in self.multiclass_biomarkers
361
+ ],
362
+ 'continuous_biomarkers': [
363
+ {
364
+ 'name': c.name,
365
+ 'description': c.description,
366
+ 'min_value': c.min_value,
367
+ 'max_value': c.max_value,
368
+ 'normalization': c.normalization
369
+ }
370
+ for c in self.continuous_biomarkers
371
+ ],
372
+ 'preprocessing': self.preprocessing,
373
+ 'training': self.training,
374
+ 'validation': self.validation
375
+ }
376
+
377
+ if file_path.endswith('.yaml') or file_path.endswith('.yml'):
378
+ with open(file_path, 'w') as f:
379
+ yaml.safe_dump(config_data, f, default_flow_style=False, sort_keys=False, indent=2)
380
+ elif file_path.endswith('.json'):
381
+ with open(file_path, 'w') as f:
382
+ json.dump(config_data, f, indent=2)
383
+ else:
384
+ raise ValueError(f"Unsupported file format: {file_path}")
385
+
386
+ def print_summary(self):
387
+ """Print a summary of the configuration"""
388
+ print(f"Experiment: {self.experiment_name}")
389
+ print(f"Description: {self.description}")
390
+ print(f"\nTask Configuration:")
391
+ print(f" Binary tasks: {self.num_binary_tasks}")
392
+ print(f" Multiclass tasks: {self.num_multiclass_tasks}")
393
+ print(f" Continuous tasks: {self.num_continuous_tasks}")
394
+ print(f" Total output size: {self.total_output_size}")
395
+
396
+ print(f"\nBinary Biomarkers:")
397
+ for b in self.binary_biomarkers:
398
+ print(f" - {b.name}: {b.description}")
399
+
400
+ if self.multiclass_biomarkers:
401
+ print(f"\nMulticlass Biomarkers:")
402
+ for m in self.multiclass_biomarkers:
403
+ print(f" - {m.name}: {m.description} ({len(m.classes)} classes)")
404
+
405
+ print(f"\nContinuous Biomarkers:")
406
+ for c in self.continuous_biomarkers:
407
+ print(f" - {c.name}: {c.description} (range: {c.min_value}-{c.max_value})")
408
+
409
+ print(f"\nTensor Layout:")
410
+ layout = self.get_tensor_layout()
411
+ for name, info in layout.items():
412
+ print(f" {name}: indices {info.start_idx}-{info.end_idx-1} (size: {info.size}, type: {info.task_type})")
413
+
code/config/biomarker_config_multitask_example.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "experiment_name": "comorbidities_detection_multitask",
3
+ "description": "Multi-task prediction of comorbidities and demographics from AbdCTBench",
4
+
5
+ "binary_biomarkers": [
6
+ {"name": "MORTALITY", "description": "Death in the followup period", "positive_class": "PRESENT"},
7
+ {"name": "HCC12", "description": "HCC code 12 — Breast, Prostate, and other cancers", "positive_class": "PRESENT"},
8
+ {"name": "HCC18", "description": "HCC code 18 — Diabetes with Chronic Complications", "positive_class": "PRESENT"},
9
+ {"name": "HCC96", "description": "HCC code 96 — Cardiac arrhythmias", "positive_class": "PRESENT"},
10
+ {"name": "HCC108", "description": "HCC code 108 — Vascular disease", "positive_class": "PRESENT"},
11
+ {"name": "HCC111", "description": "HCC code 111 — Chronic obstructive pulmonary disease", "positive_class": "PRESENT"},
12
+ {"name": "T2D", "description": "Type 2 Diabetes", "positive_class": "PRESENT"},
13
+ {"name": "MI", "description": "Myocardial Infarction", "positive_class": "PRESENT"},
14
+ {"name": "CALCIUMSCORING_ABDOMINALAGATSTON_BINARY", "description": "High abdominal aortic calcium score (Agatston > 1000)", "positive_class": "PRESENT"}
15
+ ],
16
+
17
+ "multiclass_biomarkers": [],
18
+
19
+ "continuous_biomarkers": [
20
+ {
21
+ "name": "AGE",
22
+ "description": "Patient age in years (min-max normalized to [0, 1] during training)",
23
+ "min_value": 18,
24
+ "max_value": 89,
25
+ "normalization": "min_max"
26
+ }
27
+ ],
28
+
29
+ "preprocessing": {
30
+ "image_size": 256,
31
+ "normalize_images": true,
32
+ "convert_to_rgb": true
33
+ },
34
+
35
+ "training": {
36
+ "class_weighting": true,
37
+ "balanced_sampling": true
38
+ },
39
+
40
+ "validation": {
41
+ "threshold_optimization": true,
42
+ "optimization_metric": "f1_score",
43
+ "per_biomarker_thresholds": true,
44
+ "threshold_search_range": [0.1, 0.9],
45
+ "threshold_search_steps": 9,
46
+ "fallback_threshold": 0.5,
47
+ "metrics": [
48
+ "auroc",
49
+ "accuracy",
50
+ "sensitivity",
51
+ "specificity",
52
+ "f1_score",
53
+ "precision",
54
+ "recall"
55
+ ],
56
+ "regression_metrics": [
57
+ "mse",
58
+ "mae",
59
+ "r2_score"
60
+ ]
61
+ }
62
+ }
code/config/biomarker_config_multitask_example.yaml ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Multi-Task Biomarker Configuration — Published Experiments
2
+ #
3
+ # Pass this file to --biomarker_config in train.py or test.py.
4
+ #
5
+ # For a minimal single-task example with detailed field descriptions,
6
+ # see biomarker_config_single_task_example.yaml.
7
+
8
+ experiment_name: "comorbidities_detection_multitask"
9
+ description: "Multi-task prediction of comorbidities and demographics from AbdCTBench"
10
+
11
+ # -------------------------------------------------------------------------
12
+ # Binary classification tasks (9 targets)
13
+ # -------------------------------------------------------------------------
14
+ binary_biomarkers:
15
+ - name: "MORTALITY"
16
+ description: "Death in the followup period"
17
+ positive_class: "PRESENT"
18
+
19
+ - name: "HCC12"
20
+ description: "HCC code 12 — Breast, Prostate, and other cancers"
21
+ positive_class: "PRESENT"
22
+
23
+ - name: "HCC18"
24
+ description: "HCC code 18 — Diabetes with Chronic Complications"
25
+ positive_class: "PRESENT"
26
+
27
+ - name: "HCC96"
28
+ description: "HCC code 96 — Cardiac arrhythmias"
29
+ positive_class: "PRESENT"
30
+
31
+ - name: "HCC108"
32
+ description: "HCC code 108 — Vascular disease"
33
+ positive_class: "PRESENT"
34
+
35
+ - name: "HCC111"
36
+ description: "HCC code 111 — Chronic obstructive pulmonary disease"
37
+ positive_class: "PRESENT"
38
+
39
+ - name: "T2D"
40
+ description: "Type 2 Diabetes"
41
+ positive_class: "PRESENT"
42
+
43
+ - name: "MI"
44
+ description: "Myocardial Infarction"
45
+ positive_class: "PRESENT"
46
+
47
+ - name: "CALCIUMSCORING_ABDOMINALAGATSTON_BINARY"
48
+ description: "High abdominal aortic calcium score (Agatston > 1000)"
49
+ positive_class: "PRESENT"
50
+
51
+ # -------------------------------------------------------------------------
52
+ # Multiclass classification tasks
53
+ # -------------------------------------------------------------------------
54
+ multiclass_biomarkers: []
55
+
56
+ # -------------------------------------------------------------------------
57
+ # Continuous regression tasks (1 target)
58
+ # -------------------------------------------------------------------------
59
+ continuous_biomarkers:
60
+ - name: "AGE"
61
+ description: "Patient age in years (min-max normalized to [0, 1] during training)"
62
+ min_value: 18
63
+ max_value: 89
64
+ normalization: "min_max"
65
+
66
+ # -------------------------------------------------------------------------
67
+ # Preprocessing settings
68
+ # -------------------------------------------------------------------------
69
+ preprocessing:
70
+ image_size: 256
71
+ normalize_images: true
72
+ convert_to_rgb: true
73
+
74
+ # -------------------------------------------------------------------------
75
+ # Training settings
76
+ # -------------------------------------------------------------------------
77
+ training:
78
+ class_weighting: true
79
+ balanced_sampling: true
80
+
81
+ # -------------------------------------------------------------------------
82
+ # Validation / threshold settings
83
+ # -------------------------------------------------------------------------
84
+ validation:
85
+ threshold_optimization: true
86
+ optimization_metric: "f1_score"
87
+ per_biomarker_thresholds: true
88
+ threshold_search_range: [0.1, 0.9]
89
+ threshold_search_steps: 9
90
+ fallback_threshold: 0.5
91
+
92
+ metrics:
93
+ - "auroc"
94
+ - "accuracy"
95
+ - "sensitivity"
96
+ - "specificity"
97
+ - "f1_score"
98
+ - "precision"
99
+ - "recall"
100
+ regression_metrics:
101
+ - "mse"
102
+ - "mae"
103
+ - "r2_score"
code/config/biomarker_config_single_task_example.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "experiment_name": "mortality_single_task",
3
+ "description": "Single-task prediction of Mortality in AbdCTBench",
4
+
5
+ "binary_biomarkers": [
6
+ {
7
+ "name": "MORTALITY",
8
+ "description": "Death status",
9
+ "positive_class": "PRESENT",
10
+ "negative_class": "ABSENT"
11
+ }
12
+ ],
13
+
14
+ "multiclass_biomarkers": [],
15
+
16
+ "continuous_biomarkers": [],
17
+
18
+ "preprocessing": {
19
+ "image_size": 256,
20
+ "normalize_images": true,
21
+ "convert_to_rgb": true
22
+ },
23
+
24
+ "training": {
25
+ "class_weighting": true,
26
+ "balanced_sampling": true
27
+ },
28
+
29
+ "validation": {
30
+ "threshold_optimization": true,
31
+ "optimization_metric": "f1_score",
32
+ "per_biomarker_thresholds": true,
33
+ "threshold_search_range": [0.1, 0.9],
34
+ "threshold_search_steps": 9,
35
+ "fallback_threshold": 0.5,
36
+ "metrics": [
37
+ "auroc",
38
+ "accuracy",
39
+ "sensitivity",
40
+ "specificity",
41
+ "f1_score",
42
+ "precision",
43
+ "recall"
44
+ ],
45
+ "regression_metrics": [
46
+ "mse",
47
+ "mae",
48
+ "r2_score"
49
+ ]
50
+ }
51
+ }
code/config/biomarker_config_single_task_example.yaml ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Single-Task Biomarker Configuration Example
2
+ #
3
+ # This file shows how to configure training for a single prediction target.
4
+ # Pass this file to --biomarker_config in train.py or test.py.
5
+ #
6
+ # To define your own config, copy this file, edit the biomarker entries,
7
+ # and point --biomarker_config at your new file.
8
+ #
9
+ # For the full multi-task configuration used in the published experiments,
10
+ # see biomarker_config_multitask_example.yaml.
11
+
12
+ experiment_name: "mortality_single_task"
13
+ description: "Single-task prediction of Mortality in AbdCTBench"
14
+
15
+ # -------------------------------------------------------------------------
16
+ # Binary classification tasks (output: sigmoid probability, label: 0 or 1)
17
+ # -------------------------------------------------------------------------
18
+ # Each entry requires:
19
+ # name - column name in train.csv / val.csv / test.csv
20
+ # description - human-readable label (informational only)
21
+ # positive_class - the CSV value that maps to label=1
22
+ # negative_class - the CSV value that maps to label=0 (default: "ABSENT")
23
+ #
24
+ # Available binary targets in AbdCTBench:
25
+ # MORTALITY, HCC12, HCC18, HCC96, HCC108,
26
+ # CALCIUMSCORING_ABDOMINALAGATSTON_BINARY
27
+ binary_biomarkers:
28
+ - name: "MORTALITY"
29
+ description: "Death status"
30
+ positive_class: "PRESENT"
31
+ negative_class: "ABSENT"
32
+
33
+ # -------------------------------------------------------------------------
34
+ # Multiclass classification tasks (output: softmax probabilities)
35
+ # -------------------------------------------------------------------------
36
+ # Each entry requires:
37
+ # name - column name in the CSV
38
+ # classes - ordered list of class labels (determines one-hot encoding order)
39
+ #
40
+ # Leave empty if not needed.
41
+ multiclass_biomarkers: []
42
+
43
+ # -------------------------------------------------------------------------
44
+ # Continuous regression tasks (output: raw continuous value)
45
+ # -------------------------------------------------------------------------
46
+ # Each entry requires:
47
+ # name - column name in the CSV
48
+ # min_value - minimum expected value (used for min-max normalization)
49
+ # max_value - maximum expected value
50
+ # normalization - normalization method; currently only "min_max" is supported
51
+ #
52
+ # The model predicts a normalized value in [0, 1]; metrics are reported on
53
+ # the denormalized scale.
54
+ #
55
+ # Leave empty if not needed.
56
+ continuous_biomarkers: []
57
+
58
+ # -------------------------------------------------------------------------
59
+ # Preprocessing settings
60
+ # -------------------------------------------------------------------------
61
+ preprocessing:
62
+ image_size: 256 # images are resized to this square size
63
+ normalize_images: true
64
+ convert_to_rgb: true # grayscale CT slices are replicated to 3 channels
65
+
66
+ # -------------------------------------------------------------------------
67
+ # Training settings
68
+ # -------------------------------------------------------------------------
69
+ training:
70
+ class_weighting: true # use inverse-frequency weighting for binary tasks
71
+ balanced_sampling: true # balanced batch sampling
72
+
73
+ # -------------------------------------------------------------------------
74
+ # Validation / threshold settings
75
+ # -------------------------------------------------------------------------
76
+ validation:
77
+ threshold_optimization: true
78
+ optimization_metric: "f1_score" # metric used to pick the best decision threshold
79
+ per_biomarker_thresholds: true # optimize a separate threshold per biomarker
80
+ threshold_search_range: [0.1, 0.9]
81
+ threshold_search_steps: 9 # evaluates thresholds 0.1, 0.2, ..., 0.9
82
+ fallback_threshold: 0.5
83
+
84
+ metrics:
85
+ - "auroc"
86
+ - "accuracy"
87
+ - "sensitivity"
88
+ - "specificity"
89
+ - "f1_score"
90
+ - "precision"
91
+ - "recall"
92
+ regression_metrics:
93
+ - "mse"
94
+ - "mae"
95
+ - "r2_score"
code/config/experiment_config.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Experiment Configuration System
3
+ Handles experiment parameters for training and evaluation
4
+ """
5
+
6
+ import ast
7
+ from dataclasses import dataclass
8
+ from typing import List, Dict, Any
9
+ import os
10
+
11
+
12
+ # Per-model defaults for pretrained_weights and single_target_strategy.
13
+ # These are sourced from the experimentation plan used in the published results.
14
+ # Users can override both via --pretrained_weights and --single_target_strategy CLI flags.
15
+ MODEL_DEFAULTS: Dict[str, Dict[str, str]] = {
16
+ "ResNet-18": {"pretrained_weights": "ImageNet", "single_target_strategy": "Direct classification head"},
17
+ "ResNet-34": {"pretrained_weights": "ImageNet", "single_target_strategy": "Direct classification head"},
18
+ "DenseNet-121":{"pretrained_weights": "ImageNet", "single_target_strategy": "Direct classification head"},
19
+ "EfficientNet-B0": {"pretrained_weights": "ImageNet", "single_target_strategy": "Direct classification head"},
20
+ "ViT-Small (DINOv2)": {"pretrained_weights": "DINOv2 (self-supervised)", "single_target_strategy": "CLS token classification"},
21
+ "Swin Transformer-Base": {"pretrained_weights": "ImageNet-22K", "single_target_strategy": "CLS token classification"},
22
+ "ResNet-50 (RadImageNet)": {"pretrained_weights": "RadImageNet", "single_target_strategy": "Direct classification head"},
23
+ }
24
+
25
+ DEFAULT_AUGMENTATION_PARAMS: Dict[str, Any] = {
26
+ "rotation": 15,
27
+ "horizontal_flip": True,
28
+ "random_crop": True,
29
+ "color_jitter": True,
30
+ "brightness": 0.2,
31
+ "contrast": 0.2,
32
+ "imagenet_norm": True,
33
+ }
34
+
35
+ DEFAULT_AUGMENTATIONS = DEFAULT_AUGMENTATION_PARAMS.copy()
36
+
37
+
38
+ def get_model_defaults(model_name: str) -> Dict[str, str]:
39
+ """
40
+ Return the default pretrained_weights and single_target_strategy for a given model.
41
+ Falls back to ImageNet weights and Direct classification head for unknown models.
42
+ """
43
+ return MODEL_DEFAULTS.get(
44
+ model_name,
45
+ {"pretrained_weights": "ImageNet", "single_target_strategy": "Direct classification head"}
46
+ )
47
+
48
+
49
+ @dataclass
50
+ class ExperimentConfig:
51
+ """Configuration for a single experiment"""
52
+
53
+ # Model configuration
54
+ model: str
55
+ loss_function: str
56
+ must_include: bool
57
+ learning_rate: List[float]
58
+ batch_size: int
59
+ weight_decay: float
60
+ optimizer: str
61
+ scheduler: str
62
+
63
+ # Training configuration
64
+ image_augmentations: Dict[str, Any]
65
+ dropout: float
66
+ loss_specific_params: str
67
+ multi_target_strategy: str
68
+ single_target_strategy: str
69
+ pretrained_weights: str
70
+ fine_tuning_strategy: str
71
+
72
+ # System configuration
73
+ expected_gpu_memory: str
74
+ architectural_family: str
75
+ class_weighting: str
76
+ sampling_strategy: str
77
+ threshold_selection: str
78
+
79
+ # Additional configuration
80
+ experiment_name: str = ""
81
+ output_dir: str = ""
82
+
83
+ # GradNorm configuration
84
+ use_gradnorm: bool = False
85
+ gradnorm_alpha: float = 0.16
86
+ gradnorm_update_freq: int = 10
87
+
88
+ def __post_init__(self):
89
+ """Process configuration after initialization"""
90
+ if isinstance(self.learning_rate, str):
91
+ try:
92
+ self.learning_rate = ast.literal_eval(self.learning_rate)
93
+ except (ValueError, SyntaxError):
94
+ try:
95
+ self.learning_rate = [float(self.learning_rate)]
96
+ except ValueError:
97
+ self.learning_rate = [1e-4]
98
+
99
+ if not isinstance(self.learning_rate, list):
100
+ self.learning_rate = [self.learning_rate]
101
+
102
+ self.image_augmentations = normalize_augmentation_params(self.image_augmentations)
103
+
104
+ if not self.experiment_name:
105
+ self.experiment_name = self._generate_experiment_name()
106
+
107
+ def _generate_experiment_name(self) -> str:
108
+ """Generate a unique experiment name based on configuration"""
109
+ import datetime
110
+
111
+ model_clean = self.model.replace('/', '_').replace(' ', '_').replace('(', '').replace(')', '')
112
+ lr_str = f"lr{self.learning_rate[0]:.0e}" if len(self.learning_rate) == 1 else "lr_sweep"
113
+ batch_str = f"bs{self.batch_size}"
114
+
115
+ ft_suffix = ""
116
+ if "frozen" in self.fine_tuning_strategy.lower() or "probe" in self.fine_tuning_strategy.lower():
117
+ ft_suffix = "_frozen"
118
+ elif "partial" in self.fine_tuning_strategy.lower():
119
+ ft_suffix = "_partial"
120
+
121
+ timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
122
+ return f"{model_clean}_{lr_str}_{batch_str}{ft_suffix}_{timestamp}"
123
+
124
+ def get_output_directory(self, base_dir: str) -> str:
125
+ """Get the output directory for this experiment"""
126
+ if self.output_dir:
127
+ return self.output_dir
128
+ return os.path.join(base_dir, self.experiment_name)
129
+
130
+ def to_dict(self) -> Dict[str, Any]:
131
+ """Convert configuration to dictionary"""
132
+ learning_rate_value = self.learning_rate[0] if len(self.learning_rate) == 1 else self.learning_rate
133
+
134
+ return {
135
+ 'model': self.model,
136
+ 'loss_function': self.loss_function,
137
+ 'must_include': self.must_include,
138
+ 'learning_rate': learning_rate_value,
139
+ 'batch_size': self.batch_size,
140
+ 'weight_decay': self.weight_decay,
141
+ 'optimizer': self.optimizer,
142
+ 'scheduler': self.scheduler,
143
+ 'image_augmentations': self.image_augmentations,
144
+ 'dropout': self.dropout,
145
+ 'loss_specific_params': self.loss_specific_params,
146
+ 'multi_target_strategy': self.multi_target_strategy,
147
+ 'single_target_strategy': self.single_target_strategy,
148
+ 'pretrained_weights': self.pretrained_weights,
149
+ 'fine_tuning_strategy': self.fine_tuning_strategy,
150
+ 'expected_gpu_memory': self.expected_gpu_memory,
151
+ 'architectural_family': self.architectural_family,
152
+ 'class_weighting': self.class_weighting,
153
+ 'sampling_strategy': self.sampling_strategy,
154
+ 'threshold_selection': self.threshold_selection,
155
+ 'experiment_name': self.experiment_name,
156
+ }
157
+
158
+
159
+ def normalize_augmentation_params(aug_input: Any) -> Dict[str, Any]:
160
+ """Normalize augmentation params into a validated parameter dictionary."""
161
+ aug_params = DEFAULT_AUGMENTATION_PARAMS.copy()
162
+
163
+ if aug_input is None:
164
+ return aug_params
165
+
166
+ if isinstance(aug_input, str):
167
+ try:
168
+ parsed = ast.literal_eval(aug_input)
169
+ except (ValueError, SyntaxError) as exc:
170
+ raise ValueError(
171
+ "image_augmentations must be a dict (or a dict-like string), "
172
+ "not a free-form text description."
173
+ ) from exc
174
+ aug_input = parsed
175
+
176
+ if not isinstance(aug_input, dict):
177
+ raise ValueError("image_augmentations must be a dictionary of augmentation params.")
178
+
179
+ aug_params.update(aug_input)
180
+
181
+ # Enforce expected types
182
+ aug_params["rotation"] = int(aug_params["rotation"])
183
+ aug_params["horizontal_flip"] = bool(aug_params["horizontal_flip"])
184
+ aug_params["random_crop"] = bool(aug_params["random_crop"])
185
+ aug_params["color_jitter"] = bool(aug_params["color_jitter"])
186
+ aug_params["brightness"] = float(aug_params["brightness"])
187
+ aug_params["contrast"] = float(aug_params["contrast"])
188
+ aug_params["imagenet_norm"] = bool(aug_params["imagenet_norm"])
189
+
190
+ return aug_params
191
+
192
+
193
+ def parse_augmentation_string(aug_input: Any) -> Dict[str, Any]:
194
+ """Backward-compatible alias for older imports/call sites."""
195
+ return normalize_augmentation_params(aug_input)
196
+
197
+
198
+ def create_optimizer(model_parameters, config: 'ExperimentConfig'):
199
+ """Create optimizer based on configuration"""
200
+ import torch.optim as optim
201
+
202
+ if config.optimizer == 'AdamW':
203
+ return optim.AdamW(
204
+ model_parameters,
205
+ lr=config.learning_rate[0],
206
+ weight_decay=config.weight_decay
207
+ )
208
+ elif config.optimizer == 'Adam':
209
+ return optim.Adam(
210
+ model_parameters,
211
+ lr=config.learning_rate[0],
212
+ weight_decay=config.weight_decay
213
+ )
214
+ elif config.optimizer == 'SGD':
215
+ return optim.SGD(
216
+ model_parameters,
217
+ lr=config.learning_rate[0],
218
+ weight_decay=config.weight_decay,
219
+ momentum=0.9
220
+ )
221
+ else:
222
+ raise ValueError(f"Unsupported optimizer: {config.optimizer}")
223
+
224
+
225
+ def create_scheduler(optimizer, config: 'ExperimentConfig', total_epochs: int):
226
+ """Create learning rate scheduler based on configuration"""
227
+ import torch.optim.lr_scheduler as lr_scheduler
228
+
229
+ if config.scheduler == 'CosineAnnealing':
230
+ return lr_scheduler.CosineAnnealingLR(optimizer, T_max=total_epochs)
231
+ elif config.scheduler == 'CosineAnnealingWarmRestarts':
232
+ return lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=10, T_mult=2)
233
+ elif config.scheduler == 'ReduceLROnPlateau':
234
+ return lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', patience=10, factor=0.5)
235
+ elif config.scheduler == 'StepLR':
236
+ return lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.1)
237
+ elif config.scheduler == 'ExponentialLR':
238
+ return lr_scheduler.ExponentialLR(optimizer, gamma=0.95)
239
+ else:
240
+ raise ValueError(f"Unsupported scheduler: {config.scheduler}")
code/dataset.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pandas as pd
3
+ import torch
4
+ from torch.utils.data import Dataset
5
+ from PIL import Image
6
+
7
+ from utils.labels import Condition
8
+ from config.biomarker_config import FlexibleBiomarkerConfig
9
+
10
+ class ClassifierDataset(Dataset):
11
+ """
12
+ Load images and corresponding labels for
13
+ """
14
+ def __init__(self, data_path, biomarker_config, transforms=None, size=256, train=True, csv_file=None):
15
+ """
16
+ Initialize data set
17
+ Loads and preprocesses data
18
+ @param data_path : path to data and labels
19
+ @param biomarker_config : FlexibleBiomarkerConfig object specifying which biomarkers to use
20
+ @param size : size of each xray
21
+ @param train : load train or test dataset
22
+ """
23
+ if not os.path.exists(data_path):
24
+ raise IOError('Path given for ClassifierDataset {} does not exist...'.format(data_path))
25
+ self.data_path = data_path
26
+ self.size = size
27
+ self.biomarker_config = biomarker_config
28
+
29
+ csv_name = csv_file if csv_file is not None else ('train.csv' if train else 'val.csv')
30
+ self.df = pd.read_csv(os.path.join(data_path, csv_name))
31
+
32
+ # Apply age filtering for HIPAA compliance
33
+ self.df = self._filter_age_records()
34
+
35
+ self.transforms = transforms
36
+
37
+ # Get tensor layout for efficient indexing
38
+ self.tensor_layout = self.biomarker_config.get_tensor_layout()
39
+
40
+ # Pre-compute all target tensors for efficient access (needed for class weights)
41
+ self.targets = self._prepare_all_targets()
42
+
43
+ print(f"Biomarkers configured: {self.biomarker_config.get_all_biomarker_names()}")
44
+ print(f"Total output tensor size: {self.biomarker_config.total_output_size}")
45
+
46
+ def _filter_age_records(self):
47
+ """
48
+ Filter out records with AGE = "90+" for HIPAA compliance.
49
+ Also ensures that remaining records have max age of 89.
50
+ """
51
+ if 'AGE' not in self.df.columns:
52
+ print("AGE column not found - skipping age filtering")
53
+ return self.df
54
+
55
+ original_count = len(self.df)
56
+
57
+ # Filter out "90+" records
58
+ age_90_plus_mask = self.df['AGE'] == '90+'
59
+ age_90_plus_count = age_90_plus_mask.sum()
60
+
61
+ if age_90_plus_count > 0:
62
+ print(f"HIPAA Compliance: Filtering out {age_90_plus_count:,} records with AGE='90+'")
63
+ self.df = self.df[~age_90_plus_mask].copy()
64
+
65
+ # Convert remaining AGE values to numeric and verify max age is 89
66
+ numeric_age_mask = pd.to_numeric(self.df['AGE'], errors='coerce').notna()
67
+ if not numeric_age_mask.all():
68
+ # Handle any non-numeric age values (shouldn't happen after filtering 90+)
69
+ non_numeric_count = (~numeric_age_mask).sum()
70
+ print(f"Found {non_numeric_count} non-numeric AGE values, filtering them out")
71
+ self.df = self.df[numeric_age_mask].copy()
72
+
73
+ # Convert to numeric and verify max age
74
+ self.df['AGE'] = pd.to_numeric(self.df['AGE'], errors='coerce')
75
+
76
+ if len(self.df) > 0:
77
+ max_age = self.df['AGE'].max()
78
+ min_age = self.df['AGE'].min()
79
+
80
+ if max_age > 89:
81
+ print(f"Warning: Maximum age is {max_age}, expected <= 89")
82
+ else:
83
+ print(f"Age range after filtering: {min_age:.0f} - {max_age:.0f} years")
84
+
85
+ filtered_count = len(self.df)
86
+ removed_count = original_count - filtered_count
87
+
88
+ if removed_count > 0:
89
+ print(f"Dataset filtering summary:")
90
+ print(f" Original records: {original_count:,}")
91
+ print(f" Removed records: {removed_count:,}")
92
+ print(f" Remaining records: {filtered_count:,}")
93
+ print(f" Removal rate: {removed_count/original_count*100:.1f}%")
94
+
95
+ return self.df
96
+
97
+ def __len__(self):
98
+ """
99
+ Get length of dataset
100
+ @return len : length of dataset
101
+ """
102
+ return self.df.shape[0]
103
+
104
+ def _prepare_all_targets(self):
105
+ """Pre-compute all target tensors for the dataset"""
106
+ import numpy as np
107
+
108
+ targets = []
109
+ for idx in range(len(self.df)):
110
+ data = self.df.iloc[idx]
111
+
112
+ # Create tensor with the configured size
113
+ t = torch.zeros(self.biomarker_config.total_output_size, dtype=torch.float32)
114
+
115
+ # Process binary biomarkers
116
+ for biomarker in self.biomarker_config.binary_biomarkers:
117
+ if biomarker.name in data:
118
+ layout = self.tensor_layout[biomarker.name]
119
+ idx_start = layout.start_idx
120
+
121
+ # Convert using configured classes or default Condition enum
122
+ if biomarker.positive_class == "PRESENT" and biomarker.negative_class == "ABSENT":
123
+ # Use default Condition converter
124
+ t[idx_start] = Condition.convert(data[biomarker.name])
125
+ else:
126
+ # Use custom class mapping
127
+ if data[biomarker.name] == biomarker.positive_class:
128
+ t[idx_start] = 1.0
129
+ elif data[biomarker.name] == biomarker.negative_class:
130
+ t[idx_start] = 0.0
131
+ else:
132
+ # Default to negative class for unknown values
133
+ t[idx_start] = 0.0
134
+
135
+ # Process multiclass biomarkers
136
+ for biomarker in self.biomarker_config.multiclass_biomarkers:
137
+ if biomarker.name in data:
138
+ layout = self.tensor_layout[biomarker.name]
139
+ idx_start = layout.start_idx
140
+
141
+ # Get class index and create one-hot encoding
142
+ try:
143
+ class_idx = biomarker.class_to_index(data[biomarker.name])
144
+ t[idx_start + class_idx] = 1.0
145
+ except ValueError:
146
+ # Default to first class if unknown value
147
+ print(f"Warning: Unknown value '{data[biomarker.name]}' for {biomarker.name}, using first class")
148
+ t[idx_start] = 1.0
149
+
150
+ # Process continuous biomarkers
151
+ for biomarker in self.biomarker_config.continuous_biomarkers:
152
+ if biomarker.name in data:
153
+ layout = self.tensor_layout[biomarker.name]
154
+ idx_start = layout.start_idx
155
+
156
+ # Normalize the continuous value
157
+ raw_value = float(data[biomarker.name])
158
+ normalized_value = biomarker.normalize(raw_value)
159
+ t[idx_start] = normalized_value
160
+
161
+ targets.append(t.numpy())
162
+
163
+ return np.array(targets)
164
+
165
+ def __getitem__(self, idx):
166
+ """
167
+ Gets data at a certain index
168
+ @param idx : idx of data desired
169
+ @return xray : xray image at idx
170
+ @return tensor : tensor of biomarker values at idx
171
+ """
172
+ data = self.df.iloc[idx]
173
+
174
+ # Get pre-computed targets
175
+ t = torch.tensor(self.targets[idx], dtype=torch.float32)
176
+
177
+ # Load and process image
178
+ xray = Image.open(os.path.join(self.data_path, 'data', data['FILE'] + '.png'))
179
+ xray = xray.resize((self.size, self.size), Image.LANCZOS)
180
+ xray = xray.convert('L')
181
+ if self.transforms:
182
+ xray = self.transforms(xray)
183
+
184
+ return xray, t
185
+
186
+ def at(self,idx):
187
+ """
188
+ Gets directory name for a certain index
189
+ @param idx : idx of data directory desired
190
+ @return name : name of study at idx
191
+ """
192
+ return self.df.iloc[idx]['FILE'].split('.')[0]
193
+
194
+
195
+ class PredictionDataset(Dataset):
196
+ """Prediction-only dataset that loads input images without labels."""
197
+
198
+ def __init__(self, data_path, transforms=None, size=256):
199
+ if not os.path.exists(data_path):
200
+ raise IOError(f'Path given for PredictionDataset {data_path} does not exist...')
201
+ self.data_path = data_path
202
+ valid_exts = (".png", ".jpg", ".jpeg", ".bmp", ".tif", ".tiff", ".webp")
203
+ self.data = sorted(
204
+ fname for fname in os.listdir(data_path)
205
+ if fname.lower().endswith(valid_exts)
206
+ )
207
+ self.size = size
208
+ self.transforms = transforms
209
+
210
+ def __len__(self):
211
+ return len(self.data)
212
+
213
+ def __getitem__(self, idx):
214
+ fname = self.data[idx]
215
+ xray = Image.open(os.path.join(self.data_path, fname))
216
+ xray = xray.resize((self.size, self.size), Image.LANCZOS)
217
+ xray = xray.convert('L')
218
+ if self.transforms:
219
+ xray = self.transforms(xray)
220
+ return xray
221
+
222
+ def at(self, idx):
223
+ return self.data[idx]
224
+
225
+ if __name__ == "__main__":
226
+ from torch.utils.data import DataLoader
227
+ c = ClassifierDataset('data')
228
+ print(len(c))
229
+ print(c[0][0].shape)
230
+ print(c[0][1].shape)
231
+ print(c[0][1])
232
+ print(c.at(2))
233
+ data = DataLoader(c, batch_size=4, shuffle=True)
234
+ for s in data:
235
+ print(s[0][0].shape, s[1][0], s[1][0].shape)
236
+ print(s[0][1].shape, s[1][1], s[1][1].shape)
237
+ print(s[0][2].shape, s[1][2], s[1][2].shape)
238
+ print(s[0][3].shape, s[1][3], s[1][3].shape)
239
+ break
code/model/flexible_multitask_head.py ADDED
@@ -0,0 +1,686 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Flexible Multi-Task Head and Loss Function
3
+ Adapts to any biomarker configuration without hardcoded assumptions
4
+ """
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+ import torch.nn.functional as F
9
+ import numpy as np
10
+ from typing import Dict, List, Tuple, Any, Union, Optional
11
+ import logging
12
+ from config.biomarker_config import FlexibleBiomarkerConfig, TensorLayout
13
+ from .single_target_strategies import (
14
+ SingleTargetStrategy,
15
+ create_feature_extractor,
16
+ get_strategy_from_name
17
+ )
18
+
19
+ logger = logging.getLogger(__name__)
20
+
21
+
22
+ class FlexibleMultiTaskHead(nn.Module):
23
+ """Flexible multi-task head that adapts to biomarker configuration"""
24
+
25
+ def __init__(
26
+ self,
27
+ input_dim: int,
28
+ biomarker_config: FlexibleBiomarkerConfig,
29
+ dropout: float = 0.1,
30
+ single_target_strategy: Optional[Union[str, SingleTargetStrategy]] = None,
31
+ target_feature_dim: Optional[int] = None
32
+ ):
33
+ super().__init__()
34
+
35
+ self.biomarker_config = biomarker_config
36
+ self.tensor_layout = biomarker_config.get_tensor_layout()
37
+
38
+ # Handle single-target strategy
39
+ self.single_target_strategy = None
40
+ self.feature_extractor = None
41
+ self.target_feature_dim = target_feature_dim
42
+
43
+ if single_target_strategy is not None:
44
+ if isinstance(single_target_strategy, str):
45
+ self.single_target_strategy = get_strategy_from_name(single_target_strategy)
46
+ else:
47
+ self.single_target_strategy = single_target_strategy
48
+
49
+ # Create appropriate feature extractor with target feature dimension
50
+ feature_dim = target_feature_dim if target_feature_dim is not None else input_dim
51
+ self.feature_extractor = create_feature_extractor(
52
+ self.single_target_strategy,
53
+ input_dim,
54
+ feature_dim=feature_dim,
55
+ dropout=dropout
56
+ )
57
+
58
+ # Use feature extractor output dimension
59
+ processed_input_dim = self.feature_extractor.output_dim
60
+ else:
61
+ # Default behavior - use original input dimension
62
+ processed_input_dim = input_dim
63
+
64
+ # Shared feature processing (only if no single-target strategy is used)
65
+ if self.single_target_strategy is None:
66
+ # Use target_feature_dim if provided, otherwise use 512 as default
67
+ output_dim = target_feature_dim if target_feature_dim is not None else 512
68
+ self.shared_layers = nn.Sequential(
69
+ nn.Linear(input_dim, output_dim),
70
+ nn.ReLU(inplace=True),
71
+ nn.Dropout(dropout),
72
+ nn.BatchNorm1d(output_dim)
73
+ )
74
+ processed_input_dim = output_dim
75
+ else:
76
+ # Skip shared layers when using single-target strategy
77
+ self.shared_layers = nn.Identity()
78
+ processed_input_dim = self.feature_extractor.output_dim
79
+
80
+ # Task-specific heads
81
+ self.task_heads = nn.ModuleDict()
82
+
83
+ # Binary classification heads (one per biomarker)
84
+ for biomarker in biomarker_config.binary_biomarkers:
85
+ self.task_heads[f"binary_{biomarker.name}"] = nn.Linear(processed_input_dim, 1)
86
+
87
+ # Multiclass classification heads
88
+ for biomarker in biomarker_config.multiclass_biomarkers:
89
+ num_classes = len(biomarker.classes)
90
+ self.task_heads[f"multiclass_{biomarker.name}"] = nn.Linear(processed_input_dim, num_classes)
91
+
92
+ # Regression heads (one per biomarker)
93
+ for biomarker in biomarker_config.continuous_biomarkers:
94
+ self.task_heads[f"continuous_{biomarker.name}"] = nn.Linear(processed_input_dim, 1)
95
+
96
+ def forward(self, x):
97
+ """
98
+ Forward pass
99
+
100
+ Args:
101
+ x: Input features [batch_size, input_dim] or [batch_size, C, H, W] for spatial features
102
+
103
+ Returns:
104
+ Concatenated outputs [batch_size, total_output_size]
105
+ """
106
+ # Apply single-target strategy feature extraction if specified
107
+ if self.single_target_strategy is not None and self.feature_extractor is not None:
108
+ shared_features = self.feature_extractor(x)
109
+ else:
110
+ shared_features = self.shared_layers(x)
111
+
112
+ outputs = []
113
+
114
+ # Binary outputs
115
+ for biomarker in self.biomarker_config.binary_biomarkers:
116
+ head_name = f"binary_{biomarker.name}"
117
+ binary_out = self.task_heads[head_name](shared_features) # [B, 1]
118
+ outputs.append(binary_out)
119
+
120
+ # Multiclass outputs
121
+ for biomarker in self.biomarker_config.multiclass_biomarkers:
122
+ head_name = f"multiclass_{biomarker.name}"
123
+ multiclass_out = self.task_heads[head_name](shared_features) # [B, num_classes]
124
+ outputs.append(multiclass_out)
125
+
126
+ # Regression outputs
127
+ for biomarker in self.biomarker_config.continuous_biomarkers:
128
+ head_name = f"continuous_{biomarker.name}"
129
+ regression_out = self.task_heads[head_name](shared_features) # [B, 1]
130
+ outputs.append(regression_out)
131
+
132
+ # Concatenate all outputs
133
+ return torch.cat(outputs, dim=1)
134
+
135
+
136
+ class LinearProbeMultiTaskHead(nn.Module):
137
+ """
138
+ True linear probe head - direct mapping from backbone features to task outputs
139
+ No shared layers, minimal parameters, maximum interpretability
140
+ """
141
+
142
+ def __init__(
143
+ self,
144
+ input_dim: int,
145
+ biomarker_config: FlexibleBiomarkerConfig,
146
+ dropout: float = 0.0,
147
+ single_target_strategy: Optional[Union[str, SingleTargetStrategy]] = None,
148
+ target_feature_dim: Optional[int] = None
149
+ ):
150
+ super().__init__()
151
+
152
+ self.biomarker_config = biomarker_config
153
+ self.tensor_layout = biomarker_config.get_tensor_layout()
154
+
155
+ # Handle single-target strategy
156
+ self.single_target_strategy = None
157
+ self.feature_extractor = None
158
+ self.target_feature_dim = target_feature_dim
159
+
160
+ if single_target_strategy is not None:
161
+ if isinstance(single_target_strategy, str):
162
+ self.single_target_strategy = get_strategy_from_name(single_target_strategy)
163
+ else:
164
+ self.single_target_strategy = single_target_strategy
165
+
166
+ # Create appropriate feature extractor with target feature dimension
167
+ feature_dim = target_feature_dim if target_feature_dim is not None else input_dim
168
+ self.feature_extractor = create_feature_extractor(
169
+ self.single_target_strategy,
170
+ input_dim,
171
+ feature_dim=feature_dim,
172
+ dropout=0.0 # No dropout for linear probe
173
+ )
174
+
175
+ # Use feature extractor output dimension
176
+ processed_input_dim = self.feature_extractor.output_dim
177
+ else:
178
+ # Default behavior - use original input dimension
179
+ processed_input_dim = input_dim
180
+
181
+ # Optional minimal dropout (usually 0.0 for true linear probe)
182
+ self.dropout = nn.Dropout(dropout) if dropout > 0 else nn.Identity()
183
+
184
+ # Direct task-specific linear layers (no shared processing)
185
+ self.task_heads = nn.ModuleDict()
186
+
187
+ # Binary classification heads - direct from backbone
188
+ for biomarker in biomarker_config.binary_biomarkers:
189
+ self.task_heads[f"binary_{biomarker.name}"] = nn.Linear(processed_input_dim, 1)
190
+
191
+ # Multiclass classification heads - direct from backbone
192
+ for biomarker in biomarker_config.multiclass_biomarkers:
193
+ num_classes = len(biomarker.classes)
194
+ self.task_heads[f"multiclass_{biomarker.name}"] = nn.Linear(processed_input_dim, num_classes)
195
+
196
+ # Regression heads - direct from backbone
197
+ for biomarker in biomarker_config.continuous_biomarkers:
198
+ self.task_heads[f"continuous_{biomarker.name}"] = nn.Linear(processed_input_dim, 1)
199
+
200
+ # Initialize weights for better convergence
201
+ self._initialize_weights()
202
+
203
+ def _initialize_weights(self):
204
+ """Initialize linear layer weights for stable training"""
205
+ for name, module in self.task_heads.items():
206
+ if isinstance(module, nn.Linear):
207
+ # Xavier/Glorot initialization for linear layers
208
+ nn.init.xavier_uniform_(module.weight)
209
+ nn.init.zeros_(module.bias)
210
+
211
+ def forward(self, x):
212
+ """
213
+ Direct forward pass - no shared processing
214
+
215
+ Args:
216
+ x: Backbone features [batch_size, input_dim] or [batch_size, C, H, W] for spatial features
217
+
218
+ Returns:
219
+ Concatenated outputs [batch_size, total_output_size]
220
+ """
221
+ # Apply single-target strategy feature extraction if specified
222
+ if self.single_target_strategy is not None and self.feature_extractor is not None:
223
+ features = self.feature_extractor(x)
224
+ else:
225
+ # Optional dropout on backbone features (usually disabled)
226
+ features = self.dropout(x) # [batch_size, input_dim]
227
+
228
+ outputs = []
229
+
230
+ # Binary outputs - direct linear transformation
231
+ for biomarker in self.biomarker_config.binary_biomarkers:
232
+ head_name = f"binary_{biomarker.name}"
233
+ binary_out = self.task_heads[head_name](features) # [B, 1]
234
+ outputs.append(binary_out)
235
+
236
+ # Multiclass outputs - direct linear transformation
237
+ for biomarker in self.biomarker_config.multiclass_biomarkers:
238
+ head_name = f"multiclass_{biomarker.name}"
239
+ multiclass_out = self.task_heads[head_name](features) # [B, num_classes]
240
+ outputs.append(multiclass_out)
241
+
242
+ # Regression outputs - direct linear transformation
243
+ for biomarker in self.biomarker_config.continuous_biomarkers:
244
+ head_name = f"continuous_{biomarker.name}"
245
+ regression_out = self.task_heads[head_name](features) # [B, 1]
246
+ outputs.append(regression_out)
247
+
248
+ # Concatenate all outputs
249
+ return torch.cat(outputs, dim=1) # [batch_size, total_output_size]
250
+
251
+
252
+ class FlexibleMultiTaskLoss(nn.Module):
253
+ """Flexible multi-task loss function that adapts to biomarker configuration"""
254
+
255
+ def __init__(self, biomarker_config: FlexibleBiomarkerConfig, class_weights: Dict[str, float] = None):
256
+ super().__init__()
257
+
258
+ self.biomarker_config = biomarker_config
259
+ self.tensor_layout = biomarker_config.get_tensor_layout()
260
+ self.class_weights = class_weights or {}
261
+
262
+ # Create weighted BCE losses for binary tasks
263
+ self.binary_losses = nn.ModuleDict()
264
+ for biomarker in biomarker_config.binary_biomarkers:
265
+ weight = self.class_weights.get(biomarker.name, 1.0)
266
+ if weight != 1.0:
267
+ pos_weight = torch.tensor([weight], dtype=torch.float32)
268
+ self.binary_losses[biomarker.name] = nn.BCEWithLogitsLoss(pos_weight=pos_weight)
269
+ else:
270
+ self.binary_losses[biomarker.name] = nn.BCEWithLogitsLoss()
271
+
272
+ # Cross-entropy losses for multiclass tasks
273
+ self.multiclass_losses = nn.ModuleDict()
274
+ for biomarker in biomarker_config.multiclass_biomarkers:
275
+ self.multiclass_losses[biomarker.name] = nn.CrossEntropyLoss()
276
+
277
+ # MSE losses for regression tasks
278
+ self.regression_losses = nn.ModuleDict()
279
+ for biomarker in biomarker_config.continuous_biomarkers:
280
+ self.regression_losses[biomarker.name] = nn.MSELoss()
281
+
282
+ def forward(self, predictions: torch.Tensor, targets: torch.Tensor) -> Tuple[torch.Tensor, Dict[str, float]]:
283
+ """
284
+ Calculate multi-task loss
285
+
286
+ Args:
287
+ predictions: [batch_size, total_output_size]
288
+ targets: [batch_size, total_output_size]
289
+
290
+ Returns:
291
+ total_loss: Combined loss
292
+ loss_dict: Dictionary with individual loss components
293
+ """
294
+ device = predictions.device
295
+
296
+ # Move pos_weight tensors to correct device
297
+ for biomarker_name, loss_fn in self.binary_losses.items():
298
+ if hasattr(loss_fn, 'pos_weight') and loss_fn.pos_weight is not None:
299
+ loss_fn.pos_weight = loss_fn.pos_weight.to(device)
300
+
301
+ total_loss = 0.0
302
+ loss_components = {}
303
+
304
+ # Binary classification losses
305
+ binary_losses = []
306
+ for biomarker in self.biomarker_config.binary_biomarkers:
307
+ layout = self.tensor_layout[biomarker.name]
308
+
309
+ pred_slice = predictions[:, layout.start_idx:layout.end_idx].squeeze(-1) # [B]
310
+ target_slice = targets[:, layout.start_idx:layout.end_idx].squeeze(-1) # [B]
311
+
312
+ loss_fn = self.binary_losses[biomarker.name]
313
+ binary_loss = loss_fn(pred_slice, target_slice)
314
+ binary_losses.append(binary_loss)
315
+
316
+ loss_components[f'binary_{biomarker.name}'] = binary_loss.item()
317
+
318
+ if binary_losses:
319
+ avg_binary_loss = torch.mean(torch.stack(binary_losses))
320
+ total_loss += avg_binary_loss
321
+ loss_components['avg_binary_loss'] = avg_binary_loss.item()
322
+
323
+ # Multiclass classification losses
324
+ multiclass_losses = []
325
+ for biomarker in self.biomarker_config.multiclass_biomarkers:
326
+ layout = self.tensor_layout[biomarker.name]
327
+
328
+ pred_slice = predictions[:, layout.start_idx:layout.end_idx] # [B, num_classes]
329
+ target_slice = targets[:, layout.start_idx:layout.end_idx] # [B, num_classes]
330
+
331
+ # Convert one-hot targets to class indices
332
+ target_indices = torch.argmax(target_slice, dim=1) # [B]
333
+
334
+ loss_fn = self.multiclass_losses[biomarker.name]
335
+ multiclass_loss = loss_fn(pred_slice, target_indices)
336
+ multiclass_losses.append(multiclass_loss)
337
+
338
+ loss_components[f'multiclass_{biomarker.name}'] = multiclass_loss.item()
339
+
340
+ if multiclass_losses:
341
+ avg_multiclass_loss = torch.mean(torch.stack(multiclass_losses))
342
+ total_loss += avg_multiclass_loss
343
+ loss_components['avg_multiclass_loss'] = avg_multiclass_loss.item()
344
+
345
+ # Regression losses
346
+ regression_losses = []
347
+ for biomarker in self.biomarker_config.continuous_biomarkers:
348
+ layout = self.tensor_layout[biomarker.name]
349
+
350
+ pred_slice = predictions[:, layout.start_idx:layout.end_idx].squeeze(-1) # [B]
351
+ target_slice = targets[:, layout.start_idx:layout.end_idx].squeeze(-1) # [B]
352
+
353
+ loss_fn = self.regression_losses[biomarker.name]
354
+ regression_loss = loss_fn(pred_slice, target_slice)
355
+ regression_losses.append(regression_loss)
356
+
357
+ loss_components[f'regression_{biomarker.name}'] = regression_loss.item()
358
+
359
+ if regression_losses:
360
+ avg_regression_loss = torch.mean(torch.stack(regression_losses))
361
+ total_loss += avg_regression_loss
362
+ loss_components['avg_regression_loss'] = avg_regression_loss.item()
363
+
364
+ loss_components['total_loss'] = total_loss.item()
365
+
366
+ return total_loss, loss_components
367
+
368
+
369
+ class FlexibleMetricsCalculator:
370
+ """Calculate comprehensive metrics for flexible multi-task learning"""
371
+
372
+ def __init__(self, biomarker_config: FlexibleBiomarkerConfig):
373
+ self.biomarker_config = biomarker_config
374
+ self.tensor_layout = biomarker_config.get_tensor_layout()
375
+
376
+ # Threshold optimization settings from config
377
+ validation_config = biomarker_config.validation
378
+ self.threshold_optimization = validation_config.get('threshold_optimization', False)
379
+ self.optimization_metric = validation_config.get('optimization_metric', 'f1_score')
380
+ self.per_biomarker_thresholds = validation_config.get('per_biomarker_thresholds', True)
381
+ self.threshold_range = validation_config.get('threshold_search_range', [0.1, 0.9])
382
+ self.threshold_steps = validation_config.get('threshold_search_steps', 81)
383
+ self.fallback_threshold = validation_config.get('fallback_threshold', 0.5)
384
+
385
+ # Store optimal thresholds per biomarker
386
+ self.optimal_thresholds = {}
387
+
388
+ def find_optimal_threshold(self, pred_probs: np.ndarray, true_labels: np.ndarray,
389
+ metric: str = 'f1_score') -> Tuple[float, float]:
390
+ """
391
+ Find optimal threshold for a single biomarker based on specified metric
392
+
393
+ Args:
394
+ pred_probs: Predicted probabilities [N]
395
+ true_labels: True binary labels [N]
396
+ metric: Metric to optimize ('f1_score', 'sensitivity', 'specificity', etc.)
397
+
398
+ Returns:
399
+ best_threshold: Optimal threshold value
400
+ best_score: Best metric score achieved
401
+ """
402
+ from sklearn.metrics import f1_score, precision_score, recall_score
403
+
404
+ # Create threshold search space
405
+ thresholds = np.linspace(self.threshold_range[0], self.threshold_range[1], self.threshold_steps)
406
+
407
+ best_threshold = self.fallback_threshold
408
+ best_score = 0.0
409
+
410
+ for threshold in thresholds:
411
+ pred_labels = (pred_probs > threshold).astype(int)
412
+
413
+ try:
414
+ if metric == 'f1_score':
415
+ score = f1_score(true_labels, pred_labels, zero_division=0.0)
416
+ elif metric == 'precision':
417
+ score = precision_score(true_labels, pred_labels, zero_division=0.0)
418
+ elif metric == 'recall' or metric == 'sensitivity':
419
+ score = recall_score(true_labels, pred_labels, zero_division=0.0)
420
+ elif metric == 'specificity':
421
+ # Specificity = TN / (TN + FP)
422
+ tn = np.sum((pred_labels == 0) & (true_labels == 0))
423
+ fp = np.sum((pred_labels == 1) & (true_labels == 0))
424
+ score = tn / (tn + fp + 1e-8)
425
+ elif metric == 'accuracy':
426
+ score = (pred_labels == true_labels).mean()
427
+ else:
428
+ # Default to f1_score
429
+ score = f1_score(true_labels, pred_labels, zero_division=0.0)
430
+
431
+ if score > best_score:
432
+ best_score = score
433
+ best_threshold = threshold
434
+
435
+ except (ValueError, ZeroDivisionError):
436
+ continue
437
+
438
+ return best_threshold, best_score
439
+
440
+ def optimize_thresholds(self, predictions: torch.Tensor, targets: torch.Tensor) -> Dict[str, float]:
441
+ """
442
+ Find optimal thresholds for all binary biomarkers
443
+
444
+ Args:
445
+ predictions: Model predictions [batch_size, total_output_size]
446
+ targets: True targets [batch_size, total_output_size]
447
+
448
+ Returns:
449
+ Dictionary mapping biomarker names to optimal thresholds
450
+ """
451
+ # Convert to numpy
452
+ if isinstance(predictions, torch.Tensor):
453
+ predictions = predictions.detach().cpu().numpy()
454
+ if isinstance(targets, torch.Tensor):
455
+ targets = targets.detach().cpu().numpy()
456
+
457
+ optimal_thresholds = {}
458
+
459
+ for biomarker in self.biomarker_config.binary_biomarkers:
460
+ layout = self.tensor_layout[biomarker.name]
461
+
462
+ pred_logits = predictions[:, layout.start_idx]
463
+ pred_probs = 1 / (1 + np.exp(-pred_logits)) # Sigmoid
464
+ true_labels = targets[:, layout.start_idx].astype(int)
465
+
466
+ # Skip if all labels are the same (no positive or negative examples)
467
+ if len(np.unique(true_labels)) < 2:
468
+ optimal_thresholds[biomarker.name] = self.fallback_threshold
469
+ continue
470
+
471
+ # Find optimal threshold
472
+ best_threshold, best_score = self.find_optimal_threshold(
473
+ pred_probs, true_labels, self.optimization_metric
474
+ )
475
+
476
+ optimal_thresholds[biomarker.name] = best_threshold
477
+
478
+ # Log the optimization result
479
+ logger.info(
480
+ " %s: threshold=%.3f, %s=%.3f",
481
+ biomarker.name,
482
+ best_threshold,
483
+ self.optimization_metric,
484
+ best_score,
485
+ )
486
+
487
+ return optimal_thresholds
488
+
489
+ def update_optimal_thresholds(self, predictions: torch.Tensor, targets: torch.Tensor):
490
+ """Update optimal thresholds based on current predictions and targets"""
491
+ if self.threshold_optimization:
492
+ logger.info("Optimizing thresholds...")
493
+ self.optimal_thresholds = self.optimize_thresholds(predictions, targets)
494
+ else:
495
+ # Use fallback threshold for all biomarkers
496
+ for biomarker in self.biomarker_config.binary_biomarkers:
497
+ self.optimal_thresholds[biomarker.name] = self.fallback_threshold
498
+
499
+ def calculate_binary_metrics(self, predictions: torch.Tensor, targets: torch.Tensor,
500
+ use_optimal_thresholds: bool = True) -> Dict[str, Dict[str, float]]:
501
+ """Calculate metrics for binary classification tasks"""
502
+ metrics = {}
503
+
504
+ # Convert to numpy
505
+ if isinstance(predictions, torch.Tensor):
506
+ predictions = predictions.detach().cpu().numpy()
507
+ if isinstance(targets, torch.Tensor):
508
+ targets = targets.detach().cpu().numpy()
509
+
510
+ for biomarker in self.biomarker_config.binary_biomarkers:
511
+ layout = self.tensor_layout[biomarker.name]
512
+
513
+ pred_logits = predictions[:, layout.start_idx]
514
+ pred_probs = 1 / (1 + np.exp(-pred_logits)) # Sigmoid
515
+ true_labels = targets[:, layout.start_idx].astype(int)
516
+
517
+ # AUROC (threshold-independent)
518
+ try:
519
+ from sklearn.metrics import roc_auc_score
520
+ auroc = roc_auc_score(true_labels, pred_probs)
521
+ except (ValueError, ImportError):
522
+ auroc = 0.0
523
+
524
+ # Get threshold for this biomarker
525
+ if use_optimal_thresholds and biomarker.name in self.optimal_thresholds:
526
+ threshold = self.optimal_thresholds[biomarker.name]
527
+ else:
528
+ threshold = self.fallback_threshold
529
+
530
+ # Predictions with threshold
531
+ pred_labels = (pred_probs > threshold).astype(int)
532
+
533
+ # Basic metrics
534
+ accuracy = (pred_labels == true_labels).mean()
535
+
536
+ # Confusion matrix components
537
+ true_positives = np.sum((pred_labels == 1) & (true_labels == 1))
538
+ true_negatives = np.sum((pred_labels == 0) & (true_labels == 0))
539
+ false_positives = np.sum((pred_labels == 1) & (true_labels == 0))
540
+ false_negatives = np.sum((pred_labels == 0) & (true_labels == 1))
541
+
542
+ # Sensitivity and Specificity
543
+ sensitivity = true_positives / (true_positives + false_negatives) if (true_positives + false_negatives) > 0 else 0.0
544
+ specificity = true_negatives / (true_negatives + false_positives) if (true_negatives + false_positives) > 0 else 0.0
545
+
546
+ # F1 Score
547
+ try:
548
+ from sklearn.metrics import f1_score
549
+ f1 = f1_score(true_labels, pred_labels, zero_division=0.0)
550
+ except ImportError:
551
+ precision = true_positives / (true_positives + false_positives) if (true_positives + false_positives) > 0 else 0.0
552
+ recall = sensitivity
553
+ f1 = 2 * (precision * recall) / (precision + recall + 1e-8)
554
+
555
+ metrics[biomarker.name] = {
556
+ 'auroc': float(auroc),
557
+ 'accuracy': float(accuracy),
558
+ 'sensitivity': float(sensitivity),
559
+ 'specificity': float(specificity),
560
+ 'f1': float(f1),
561
+ 'threshold': float(threshold), # Include threshold used
562
+ 'true_positives': int(true_positives),
563
+ 'true_negatives': int(true_negatives),
564
+ 'false_positives': int(false_positives),
565
+ 'false_negatives': int(false_negatives)
566
+ }
567
+
568
+ return metrics
569
+
570
+ def calculate_multiclass_metrics(self, predictions: torch.Tensor, targets: torch.Tensor) -> Dict[str, Dict[str, Any]]:
571
+ """Calculate metrics for multiclass classification tasks"""
572
+ metrics = {}
573
+
574
+ # Convert to numpy
575
+ if isinstance(predictions, torch.Tensor):
576
+ predictions = predictions.detach().cpu().numpy()
577
+ if isinstance(targets, torch.Tensor):
578
+ targets = targets.detach().cpu().numpy()
579
+
580
+ for biomarker in self.biomarker_config.multiclass_biomarkers:
581
+ layout = self.tensor_layout[biomarker.name]
582
+
583
+ pred_logits = predictions[:, layout.start_idx:layout.end_idx]
584
+ # Numerically stable softmax
585
+ max_logits = np.max(pred_logits, axis=1, keepdims=True)
586
+ exp_logits = np.exp(pred_logits - max_logits)
587
+ pred_probs = exp_logits / (np.sum(exp_logits, axis=1, keepdims=True) + 1e-12)
588
+ target_one_hot = targets[:, layout.start_idx:layout.end_idx]
589
+
590
+ # Get predicted and true classes
591
+ pred_classes = np.argmax(pred_probs, axis=1)
592
+ true_classes = np.argmax(target_one_hot, axis=1)
593
+
594
+ # Overall accuracy
595
+ accuracy = (pred_classes == true_classes).mean()
596
+
597
+ # Multi-class AUROC
598
+ try:
599
+ from sklearn.metrics import roc_auc_score
600
+ auroc = roc_auc_score(target_one_hot, pred_probs, multi_class='ovr', average='macro')
601
+ except (ValueError, ImportError):
602
+ auroc = 0.0
603
+
604
+ metrics[biomarker.name] = {
605
+ 'accuracy': float(accuracy),
606
+ 'auroc': float(auroc)
607
+ }
608
+
609
+ return metrics
610
+
611
+ def calculate_regression_metrics(self, predictions: torch.Tensor, targets: torch.Tensor) -> Dict[str, Dict[str, float]]:
612
+ """Calculate metrics for regression tasks"""
613
+ metrics = {}
614
+
615
+ # Convert to numpy
616
+ if isinstance(predictions, torch.Tensor):
617
+ predictions = predictions.detach().cpu().numpy()
618
+ if isinstance(targets, torch.Tensor):
619
+ targets = targets.detach().cpu().numpy()
620
+
621
+ for biomarker in self.biomarker_config.continuous_biomarkers:
622
+ layout = self.tensor_layout[biomarker.name]
623
+
624
+ pred_values = predictions[:, layout.start_idx]
625
+ true_values = targets[:, layout.start_idx]
626
+
627
+ # Denormalize if needed
628
+ if biomarker.normalization == "min_max":
629
+ pred_denorm = pred_values * (biomarker.max_value - biomarker.min_value) + biomarker.min_value
630
+ true_denorm = true_values * (biomarker.max_value - biomarker.min_value) + biomarker.min_value
631
+ else:
632
+ pred_denorm = pred_values
633
+ true_denorm = true_values
634
+
635
+ # Calculate metrics
636
+ mse = np.mean((pred_denorm - true_denorm) ** 2)
637
+ mae = np.mean(np.abs(pred_denorm - true_denorm))
638
+
639
+ # R² score
640
+ ss_res = np.sum((true_denorm - pred_denorm) ** 2)
641
+ ss_tot = np.sum((true_denorm - np.mean(true_denorm)) ** 2)
642
+ r2 = 1 - (ss_res / (ss_tot + 1e-8))
643
+
644
+ metrics[biomarker.name] = {
645
+ 'mse': float(mse),
646
+ 'mae': float(mae),
647
+ 'r2': float(r2)
648
+ }
649
+
650
+ return metrics
651
+
652
+ def calculate_all_metrics(self, predictions: torch.Tensor, targets: torch.Tensor) -> Dict[str, Any]:
653
+ """Calculate all metrics"""
654
+ all_metrics = {}
655
+
656
+ # Binary metrics
657
+ if self.biomarker_config.binary_biomarkers:
658
+ binary_metrics = self.calculate_binary_metrics(predictions, targets)
659
+ all_metrics.update(binary_metrics)
660
+
661
+ # Multiclass metrics
662
+ if self.biomarker_config.multiclass_biomarkers:
663
+ multiclass_metrics = self.calculate_multiclass_metrics(predictions, targets)
664
+ all_metrics.update(multiclass_metrics)
665
+
666
+ # Regression metrics
667
+ if self.biomarker_config.continuous_biomarkers:
668
+ regression_metrics = self.calculate_regression_metrics(predictions, targets)
669
+ all_metrics.update(regression_metrics)
670
+
671
+ # Calculate both average and median AUROC for comprehensive monitoring
672
+ auroc_values = []
673
+ for biomarker_name, metrics in all_metrics.items():
674
+ if isinstance(metrics, dict) and 'auroc' in metrics:
675
+ auroc_values.append(metrics['auroc'])
676
+
677
+ if auroc_values:
678
+ all_metrics['average_auroc'] = float(np.mean(auroc_values))
679
+ all_metrics['median_auroc'] = float(np.median(auroc_values))
680
+ else:
681
+ all_metrics['average_auroc'] = 0.0
682
+ all_metrics['median_auroc'] = 0.0
683
+
684
+ return all_metrics
685
+
686
+
code/model/gradnorm_loss.py ADDED
@@ -0,0 +1,327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ GradNorm Implementation for Multi-Task Loss Balancing
3
+ Based on "GradNorm: Gradient Normalization for Adaptive Loss Balancing in Deep Multitask Networks"
4
+ Chen et al., 2018 (https://arxiv.org/abs/1711.02257)
5
+ """
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+ from typing import Any, Dict, List, Tuple, Optional
11
+ import numpy as np
12
+ from collections import deque
13
+ import logging
14
+
15
+ from config.biomarker_config import FlexibleBiomarkerConfig
16
+ from model.flexible_multitask_head import FlexibleMultiTaskLoss
17
+
18
+ logger = logging.getLogger(__name__)
19
+
20
+
21
+ class GradNormLoss(nn.Module):
22
+ """
23
+ GradNorm-based multi-task loss balancing.
24
+
25
+ This implementation extends the FlexibleMultiTaskLoss with GradNorm algorithm
26
+ for automatic loss balancing based on gradient magnitudes.
27
+ """
28
+
29
+ def __init__(
30
+ self,
31
+ biomarker_config: FlexibleBiomarkerConfig,
32
+ class_weights: Dict[str, float] = None,
33
+ alpha: float = 0.16,
34
+ initial_task_loss_average_window: int = 20,
35
+ update_weights_every: int = 10,
36
+ normalize_losses: bool = True,
37
+ restoring_force_factor: float = 0.1
38
+ ):
39
+ """
40
+ Initialize GradNorm loss.
41
+
42
+ Args:
43
+ biomarker_config: Configuration for biomarkers and tasks
44
+ class_weights: Initial class weights for individual losses
45
+ alpha: Restoring force strength (typically 0.12-0.16)
46
+ initial_task_loss_average_window: Window size for computing initial task loss averages
47
+ update_weights_every: Update loss weights every N iterations
48
+ normalize_losses: Whether to normalize individual losses
49
+ restoring_force_factor: Factor for the restoring force strength
50
+ """
51
+ super().__init__()
52
+
53
+ self.biomarker_config = biomarker_config
54
+ self.alpha = alpha
55
+ self.update_weights_every = update_weights_every
56
+ self.normalize_losses = normalize_losses
57
+ self.restoring_force_factor = restoring_force_factor
58
+ self.initial_task_loss_average_window = initial_task_loss_average_window
59
+
60
+ # Create the base multi-task loss
61
+ self.base_loss = FlexibleMultiTaskLoss(biomarker_config, class_weights)
62
+
63
+ # Get task information
64
+ self.task_names = self._get_task_names()
65
+ self.num_tasks = len(self.task_names)
66
+
67
+ # Initialize task weights (learnable parameters)
68
+ initial_weights = torch.ones(self.num_tasks, dtype=torch.float32)
69
+ self.task_weights = nn.Parameter(initial_weights)
70
+
71
+ # Tracking variables
72
+ self.step_count = 0
73
+ self.initial_task_losses = {task: deque(maxlen=initial_task_loss_average_window)
74
+ for task in self.task_names}
75
+ self.initial_losses_computed = False
76
+ self.task_loss_averages = None
77
+
78
+ # For debugging and monitoring
79
+ self.weight_history = []
80
+ self.loss_ratio_history = []
81
+
82
+ def to(self, device):
83
+ """Move GradNorm loss to device."""
84
+ super().to(device)
85
+ self.task_weights.data = self.task_weights.data.to(device)
86
+ if self.task_loss_averages is not None:
87
+ self.task_loss_averages = self.task_loss_averages.to(device)
88
+ return self
89
+
90
+ def _get_task_names(self) -> List[str]:
91
+ """Get list of all task names from biomarker config."""
92
+ task_names = []
93
+
94
+ # Add binary tasks
95
+ for biomarker in self.biomarker_config.binary_biomarkers:
96
+ task_names.append(f"binary_{biomarker.name}")
97
+
98
+ # Add multiclass tasks
99
+ for biomarker in self.biomarker_config.multiclass_biomarkers:
100
+ task_names.append(f"multiclass_{biomarker.name}")
101
+
102
+ # Add regression tasks
103
+ for biomarker in self.biomarker_config.continuous_biomarkers:
104
+ task_names.append(f"regression_{biomarker.name}")
105
+
106
+ return task_names
107
+
108
+ def _get_task_losses_from_components(self, loss_components: Dict[str, float]) -> torch.Tensor:
109
+ """Extract individual task losses from loss components dict."""
110
+ task_losses = []
111
+
112
+ for task_name in self.task_names:
113
+ if task_name in loss_components:
114
+ task_losses.append(loss_components[task_name])
115
+ else:
116
+ # Handle missing task (shouldn't happen, but safety check)
117
+ task_losses.append(0.0)
118
+
119
+ return torch.tensor(task_losses, dtype=torch.float32, device=self.task_weights.device)
120
+
121
+ def _get_task_losses_as_tensors(self, predictions: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
122
+ """Extract individual task losses as tensors (maintaining gradient connection)."""
123
+ device = predictions.device
124
+ task_losses = []
125
+ tensor_layout = self.biomarker_config.get_tensor_layout()
126
+
127
+ # Binary classification losses
128
+ for biomarker in self.biomarker_config.binary_biomarkers:
129
+ layout = tensor_layout[biomarker.name]
130
+
131
+ pred_slice = predictions[:, layout.start_idx:layout.end_idx].squeeze(-1) # [B]
132
+ target_slice = targets[:, layout.start_idx:layout.end_idx].squeeze(-1) # [B]
133
+
134
+ loss_fn = self.base_loss.binary_losses[biomarker.name]
135
+ binary_loss = loss_fn(pred_slice, target_slice)
136
+ task_losses.append(binary_loss)
137
+
138
+ # Multiclass classification losses
139
+ for biomarker in self.biomarker_config.multiclass_biomarkers:
140
+ layout = tensor_layout[biomarker.name]
141
+
142
+ pred_slice = predictions[:, layout.start_idx:layout.end_idx] # [B, num_classes]
143
+ target_slice = targets[:, layout.start_idx:layout.end_idx] # [B, num_classes]
144
+
145
+ # Convert one-hot targets to class indices
146
+ target_indices = torch.argmax(target_slice, dim=1) # [B]
147
+
148
+ loss_fn = self.base_loss.multiclass_losses[biomarker.name]
149
+ multiclass_loss = loss_fn(pred_slice, target_indices)
150
+ task_losses.append(multiclass_loss)
151
+
152
+ # Regression losses
153
+ for biomarker in self.biomarker_config.continuous_biomarkers:
154
+ layout = tensor_layout[biomarker.name]
155
+
156
+ pred_slice = predictions[:, layout.start_idx:layout.end_idx].squeeze(-1) # [B]
157
+ target_slice = targets[:, layout.start_idx:layout.end_idx].squeeze(-1) # [B]
158
+
159
+ loss_fn = self.base_loss.regression_losses[biomarker.name]
160
+ regression_loss = loss_fn(pred_slice, target_slice)
161
+ task_losses.append(regression_loss)
162
+
163
+ return torch.stack(task_losses)
164
+
165
+ def _compute_initial_task_losses(self, task_losses: torch.Tensor):
166
+ """Collect initial task losses for computing averages."""
167
+ task_losses_cpu = task_losses.detach().cpu().numpy()
168
+
169
+ for i, task_name in enumerate(self.task_names):
170
+ self.initial_task_losses[task_name].append(task_losses_cpu[i])
171
+
172
+ # Check if we have enough samples to compute averages
173
+ min_samples = min(len(losses) for losses in self.initial_task_losses.values())
174
+ if min_samples >= self.initial_task_loss_average_window:
175
+ self.task_loss_averages = torch.tensor([
176
+ np.mean(self.initial_task_losses[task_name])
177
+ for task_name in self.task_names
178
+ ], dtype=torch.float32, device=self.task_weights.device)
179
+
180
+ self.initial_losses_computed = True
181
+ logger.info(
182
+ "GradNorm: Initial task loss averages computed: %s",
183
+ dict(zip(self.task_names, self.task_loss_averages.cpu().numpy())),
184
+ )
185
+
186
+ def _update_task_weights(self, model: nn.Module, task_losses: torch.Tensor):
187
+ """Update task weights using simplified GradNorm algorithm."""
188
+ if not self.initial_losses_computed:
189
+ return
190
+
191
+ device = self.task_weights.device
192
+
193
+ # Simplified approach: update weights based on loss ratios without gradient computation
194
+ # This avoids the gradient computation issues while still providing adaptive balancing
195
+
196
+ # Compute relative inverse training rates
197
+ task_loss_ratios = task_losses / self.task_loss_averages
198
+ relative_inverse_training_rates = task_loss_ratios / torch.mean(task_loss_ratios)
199
+
200
+ # Update weights based on relative training rates
201
+ # Higher loss ratio -> higher weight (more attention to struggling tasks)
202
+ # Apply restoring force based on relative training rates
203
+ weight_updates = (relative_inverse_training_rates ** self.alpha) - 1.0
204
+ self.task_weights.data += self.restoring_force_factor * weight_updates
205
+
206
+ # Renormalize weights to prevent them from growing unboundedly
207
+ self.task_weights.data = F.softmax(self.task_weights.data, dim=0) * self.num_tasks
208
+
209
+ # Store for monitoring
210
+ self.weight_history.append(self.task_weights.data.detach().cpu().numpy().copy())
211
+ self.loss_ratio_history.append(relative_inverse_training_rates.detach().cpu().numpy().copy())
212
+
213
+ if len(self.weight_history) % 50 == 0: # Print every 50 updates
214
+ logger.info(
215
+ "GradNorm Step %s: Weights = %s",
216
+ self.step_count,
217
+ dict(zip(self.task_names, self.task_weights.data.cpu().numpy())),
218
+ )
219
+
220
+ def forward(self, predictions: torch.Tensor, targets: torch.Tensor,
221
+ model: Optional[nn.Module] = None) -> Tuple[torch.Tensor, Dict[str, float]]:
222
+ """
223
+ Forward pass with GradNorm loss balancing.
224
+
225
+ Args:
226
+ predictions: Model predictions
227
+ targets: Ground truth targets
228
+ model: The model (needed for gradient computation)
229
+
230
+ Returns:
231
+ total_loss: Balanced total loss
232
+ loss_dict: Dictionary with loss components and weights
233
+ """
234
+ # Get individual task losses from base loss
235
+ base_total_loss, loss_components = self.base_loss(predictions, targets)
236
+
237
+ # Extract task losses as tensors (maintaining gradient connection)
238
+ task_losses = self._get_task_losses_as_tensors(predictions, targets)
239
+
240
+ # Collect initial losses if not yet computed
241
+ if not self.initial_losses_computed:
242
+ self._compute_initial_task_losses(task_losses)
243
+
244
+ # Update task weights using GradNorm (if model is provided and enough steps have passed)
245
+ if (model is not None and
246
+ self.initial_losses_computed and
247
+ self.step_count % self.update_weights_every == 0 and
248
+ self.step_count > 0):
249
+ self._update_task_weights(model, task_losses)
250
+
251
+ # Compute weighted total loss
252
+ if self.initial_losses_computed:
253
+ # Normalize task losses if requested
254
+ if self.normalize_losses:
255
+ normalized_task_losses = task_losses / self.task_loss_averages
256
+ weighted_losses = self.task_weights * normalized_task_losses
257
+ else:
258
+ weighted_losses = self.task_weights * task_losses
259
+
260
+ total_loss = torch.sum(weighted_losses)
261
+ else:
262
+ # Use equal weighting during initial phase
263
+ total_loss = torch.sum(task_losses)
264
+
265
+ # Update loss components with weights
266
+ loss_components['total_loss'] = total_loss.item()
267
+
268
+ # Add weight information to loss components
269
+ if self.initial_losses_computed:
270
+ for i, task_name in enumerate(self.task_names):
271
+ loss_components[f'weight_{task_name}'] = self.task_weights[i].item()
272
+
273
+ self.step_count += 1
274
+
275
+ return total_loss, loss_components
276
+
277
+ def get_task_weights(self) -> Dict[str, float]:
278
+ """Get current task weights as dictionary."""
279
+ if not self.initial_losses_computed:
280
+ return {task: 1.0 for task in self.task_names}
281
+
282
+ return dict(zip(self.task_names, self.task_weights.data.cpu().numpy()))
283
+
284
+ def get_weight_history(self) -> List[Dict[str, float]]:
285
+ """Get history of task weights for analysis."""
286
+ history = []
287
+ for weights in self.weight_history:
288
+ history.append(dict(zip(self.task_names, weights)))
289
+ return history
290
+
291
+ def reset_weights(self):
292
+ """Reset task weights to uniform distribution."""
293
+ with torch.no_grad():
294
+ self.task_weights.data.fill_(1.0)
295
+ self.initial_losses_computed = False
296
+ self.step_count = 0
297
+ self.weight_history.clear()
298
+ self.loss_ratio_history.clear()
299
+ for task_losses in self.initial_task_losses.values():
300
+ task_losses.clear()
301
+
302
+
303
+ class GradNormTrainer:
304
+ """
305
+ Utility class to help integrate GradNorm with existing training loops.
306
+ """
307
+
308
+ def __init__(self, gradnorm_loss: GradNormLoss):
309
+ self.gradnorm_loss = gradnorm_loss
310
+
311
+ def compute_loss(self, model: nn.Module, predictions: torch.Tensor,
312
+ targets: torch.Tensor) -> Tuple[torch.Tensor, Dict[str, float]]:
313
+ """
314
+ Compute loss using GradNorm.
315
+
316
+ This method should replace the standard criterion(predictions, targets) call.
317
+ """
318
+ return self.gradnorm_loss(predictions, targets, model)
319
+
320
+ def get_training_stats(self) -> Dict[str, Any]:
321
+ """Get training statistics for logging."""
322
+ return {
323
+ 'task_weights': self.gradnorm_loss.get_task_weights(),
324
+ 'step_count': self.gradnorm_loss.step_count,
325
+ 'initial_losses_computed': self.gradnorm_loss.initial_losses_computed,
326
+ 'num_tasks': self.gradnorm_loss.num_tasks
327
+ }
code/model/model_factory.py ADDED
@@ -0,0 +1,355 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Model Factory for public architectures used in this release.
3
+ """
4
+
5
+ from pathlib import Path
6
+
7
+ import timm
8
+ import torch
9
+ import torch.nn as nn
10
+ import torchvision.models as models
11
+ from torchvision.models import (
12
+ DenseNet121_Weights,
13
+ EfficientNet_B0_Weights,
14
+ ResNet18_Weights,
15
+ ResNet34_Weights,
16
+ ResNet50_Weights,
17
+ Swin_B_Weights,
18
+ )
19
+ from config.experiment_config import MODEL_DEFAULTS
20
+
21
+
22
+
23
+ class MultiTaskHead(nn.Module):
24
+ """Legacy fallback head used when biomarker_config is not provided."""
25
+
26
+ def __init__(self, input_dim, num_binary_tasks=7, num_calcium_classes=4, num_regression_tasks=2, dropout=0.1):
27
+ super().__init__()
28
+ self.shared_layers = nn.Sequential(
29
+ nn.Linear(input_dim, 512),
30
+ nn.ReLU(inplace=True),
31
+ nn.Dropout(dropout),
32
+ nn.BatchNorm1d(512),
33
+ )
34
+ self.binary_head = nn.Linear(512, num_binary_tasks)
35
+ self.calcium_head = nn.Linear(512, num_calcium_classes)
36
+ self.regression_head = nn.Linear(512, num_regression_tasks)
37
+
38
+ def forward(self, x):
39
+ shared = self.shared_layers(x)
40
+ binary_out = self.binary_head(shared)
41
+ calcium_out = self.calcium_head(shared)
42
+ regression_out = self.regression_head(shared)
43
+ return torch.cat([binary_out, calcium_out, regression_out], dim=1)
44
+
45
+
46
+ class ModelFactory:
47
+ """Factory class for supported public model architectures."""
48
+
49
+ SUPPORTED_ARCHITECTURES = tuple(MODEL_DEFAULTS.keys())
50
+
51
+ @staticmethod
52
+ def create_model(
53
+ architecture,
54
+ num_classes=13,
55
+ pretrained_weights=None,
56
+ fine_tuning_strategy="full",
57
+ dropout=0.1,
58
+ biomarker_config=None,
59
+ single_target_strategy=None,
60
+ target_feature_dim=None,
61
+ single_target_output_dim=None,
62
+ **kwargs,
63
+ ):
64
+ """Create model based on architecture specification."""
65
+ if single_target_output_dim is not None:
66
+ target_feature_dim = single_target_output_dim
67
+
68
+ if architecture == "ResNet-18":
69
+ return ModelFactory._create_resnet18(
70
+ num_classes,
71
+ pretrained_weights,
72
+ fine_tuning_strategy,
73
+ dropout,
74
+ biomarker_config,
75
+ single_target_strategy,
76
+ target_feature_dim,
77
+ )
78
+ if architecture == "ResNet-34":
79
+ return ModelFactory._create_resnet34(
80
+ num_classes,
81
+ pretrained_weights,
82
+ fine_tuning_strategy,
83
+ dropout,
84
+ biomarker_config,
85
+ single_target_strategy,
86
+ target_feature_dim,
87
+ )
88
+ if architecture == "DenseNet-121":
89
+ return ModelFactory._create_densenet121(
90
+ num_classes,
91
+ pretrained_weights,
92
+ fine_tuning_strategy,
93
+ dropout,
94
+ biomarker_config,
95
+ single_target_strategy,
96
+ target_feature_dim,
97
+ )
98
+ if architecture == "EfficientNet-B0":
99
+ return ModelFactory._create_efficientnet_b0(
100
+ num_classes,
101
+ pretrained_weights,
102
+ fine_tuning_strategy,
103
+ dropout,
104
+ biomarker_config,
105
+ single_target_strategy,
106
+ target_feature_dim,
107
+ )
108
+ if architecture == "ViT-Small (DINOv2)":
109
+ return ModelFactory._create_dinov2_vit(
110
+ architecture,
111
+ num_classes,
112
+ fine_tuning_strategy,
113
+ dropout,
114
+ biomarker_config,
115
+ single_target_strategy,
116
+ target_feature_dim,
117
+ )
118
+ if architecture == "Swin Transformer-Base":
119
+ return ModelFactory._create_swin_base(
120
+ num_classes,
121
+ pretrained_weights,
122
+ fine_tuning_strategy,
123
+ dropout,
124
+ biomarker_config,
125
+ single_target_strategy,
126
+ target_feature_dim,
127
+ )
128
+ if architecture == "ResNet-50 (RadImageNet)":
129
+ return ModelFactory._create_resnet50_radimgnet(
130
+ num_classes,
131
+ fine_tuning_strategy,
132
+ dropout,
133
+ biomarker_config,
134
+ single_target_strategy,
135
+ target_feature_dim,
136
+ )
137
+
138
+ raise ValueError(
139
+ f"Unsupported architecture: {architecture}. "
140
+ f"Supported: {list(ModelFactory.SUPPORTED_ARCHITECTURES)}"
141
+ )
142
+
143
+ @staticmethod
144
+ def _create_multitask_head(feature_dim, dropout, biomarker_config, head_type="flexible", single_target_strategy=None, target_feature_dim=None):
145
+ if biomarker_config is not None:
146
+ if head_type == "linear_probe":
147
+ from .flexible_multitask_head import LinearProbeMultiTaskHead
148
+
149
+ return LinearProbeMultiTaskHead(
150
+ feature_dim,
151
+ biomarker_config,
152
+ dropout=dropout,
153
+ single_target_strategy=single_target_strategy,
154
+ target_feature_dim=target_feature_dim,
155
+ )
156
+ from .flexible_multitask_head import FlexibleMultiTaskHead
157
+
158
+ return FlexibleMultiTaskHead(
159
+ feature_dim,
160
+ biomarker_config,
161
+ dropout=dropout,
162
+ single_target_strategy=single_target_strategy,
163
+ target_feature_dim=target_feature_dim,
164
+ )
165
+ return MultiTaskHead(feature_dim, dropout=dropout)
166
+
167
+ @staticmethod
168
+ def _freeze_for_linear_probe(model, head_module):
169
+ for param in model.parameters():
170
+ param.requires_grad = False
171
+ for param in head_module.parameters():
172
+ param.requires_grad = True
173
+
174
+ @staticmethod
175
+ def _create_resnet18(num_classes, pretrained_weights, fine_tuning_strategy, dropout, biomarker_config, single_target_strategy=None, target_feature_dim=None):
176
+ head_type = "linear_probe" if fine_tuning_strategy == "linear_probe" else "flexible"
177
+ if pretrained_weights == "ImageNet":
178
+ model = models.resnet18(weights=ResNet18_Weights.IMAGENET1K_V1)
179
+ else:
180
+ model = models.resnet18(weights=None)
181
+ model.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
182
+
183
+ feature_dim = model.fc.in_features
184
+ model.fc = ModelFactory._create_multitask_head(
185
+ feature_dim, dropout, biomarker_config, head_type, single_target_strategy, target_feature_dim
186
+ )
187
+ if fine_tuning_strategy == "linear_probe":
188
+ ModelFactory._freeze_for_linear_probe(model, model.fc)
189
+ return model
190
+
191
+ @staticmethod
192
+ def _create_resnet34(num_classes, pretrained_weights, fine_tuning_strategy, dropout, biomarker_config, single_target_strategy=None, target_feature_dim=None):
193
+ head_type = "linear_probe" if fine_tuning_strategy == "linear_probe" else "flexible"
194
+ if pretrained_weights == "ImageNet":
195
+ model = models.resnet34(weights=ResNet34_Weights.IMAGENET1K_V1)
196
+ else:
197
+ model = models.resnet34(weights=None)
198
+ model.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
199
+
200
+ feature_dim = model.fc.in_features
201
+ model.fc = ModelFactory._create_multitask_head(
202
+ feature_dim, dropout, biomarker_config, head_type, single_target_strategy, target_feature_dim
203
+ )
204
+ if fine_tuning_strategy == "linear_probe":
205
+ ModelFactory._freeze_for_linear_probe(model, model.fc)
206
+ return model
207
+
208
+ @staticmethod
209
+ def _create_densenet121(num_classes, pretrained_weights, fine_tuning_strategy, dropout, biomarker_config=None, single_target_strategy=None, target_feature_dim=None):
210
+ head_type = "linear_probe" if fine_tuning_strategy == "linear_probe" else "flexible"
211
+ if pretrained_weights == "ImageNet":
212
+ model = models.densenet121(weights=DenseNet121_Weights.IMAGENET1K_V1)
213
+ else:
214
+ model = models.densenet121(weights=None)
215
+ feature_dim = model.classifier.in_features
216
+ model.classifier = ModelFactory._create_multitask_head(
217
+ feature_dim,
218
+ dropout,
219
+ biomarker_config,
220
+ head_type=head_type,
221
+ single_target_strategy=single_target_strategy,
222
+ target_feature_dim=target_feature_dim,
223
+ )
224
+ if fine_tuning_strategy == "linear_probe":
225
+ ModelFactory._freeze_for_linear_probe(model, model.classifier)
226
+ return model
227
+
228
+ @staticmethod
229
+ def _create_efficientnet_b0(num_classes, pretrained_weights, fine_tuning_strategy, dropout, biomarker_config=None, single_target_strategy=None, target_feature_dim=None):
230
+ head_type = "linear_probe" if fine_tuning_strategy == "linear_probe" else "flexible"
231
+ if pretrained_weights == "ImageNet":
232
+ model = models.efficientnet_b0(weights=EfficientNet_B0_Weights.IMAGENET1K_V1)
233
+ else:
234
+ model = models.efficientnet_b0(weights=None)
235
+ feature_dim = model.classifier[1].in_features
236
+ model.classifier = ModelFactory._create_multitask_head(
237
+ feature_dim,
238
+ dropout,
239
+ biomarker_config,
240
+ head_type=head_type,
241
+ single_target_strategy=single_target_strategy,
242
+ target_feature_dim=target_feature_dim,
243
+ )
244
+ if fine_tuning_strategy == "linear_probe":
245
+ ModelFactory._freeze_for_linear_probe(model, model.classifier)
246
+ return model
247
+
248
+ @staticmethod
249
+ def _create_dinov2_vit(architecture, num_classes, fine_tuning_strategy, dropout, biomarker_config, single_target_strategy=None, target_feature_dim=None):
250
+ head_type = "linear_probe" if fine_tuning_strategy == "linear_probe" else "flexible"
251
+ model = timm.create_model("vit_small_patch14_dinov2", pretrained=True, num_classes=0, img_size=256)
252
+ feature_dim = model.num_features
253
+ model.head = ModelFactory._create_multitask_head(
254
+ feature_dim,
255
+ dropout,
256
+ biomarker_config,
257
+ head_type=head_type,
258
+ single_target_strategy=single_target_strategy,
259
+ target_feature_dim=target_feature_dim,
260
+ )
261
+ if fine_tuning_strategy == "linear_probe":
262
+ ModelFactory._freeze_for_linear_probe(model, model.head)
263
+ return model
264
+
265
+ @staticmethod
266
+ def _create_swin_base(num_classes, pretrained_weights, fine_tuning_strategy, dropout, biomarker_config=None, single_target_strategy=None, target_feature_dim=None):
267
+ head_type = "linear_probe" if fine_tuning_strategy == "linear_probe" else "flexible"
268
+ if pretrained_weights == "ImageNet-22K":
269
+ model = models.swin_b(weights=Swin_B_Weights.IMAGENET1K_V1)
270
+ else:
271
+ model = models.swin_b(weights=None)
272
+ model.features[0][0] = nn.Conv2d(1, 128, kernel_size=4, stride=4)
273
+ feature_dim = model.head.in_features
274
+ model.head = ModelFactory._create_multitask_head(
275
+ feature_dim,
276
+ dropout,
277
+ biomarker_config,
278
+ head_type=head_type,
279
+ single_target_strategy=single_target_strategy,
280
+ target_feature_dim=target_feature_dim,
281
+ )
282
+ if fine_tuning_strategy == "linear_probe":
283
+ ModelFactory._freeze_for_linear_probe(model, model.head)
284
+ return model
285
+
286
+ @staticmethod
287
+ def _create_resnet50_radimgnet(num_classes, fine_tuning_strategy, dropout, biomarker_config, single_target_strategy=None, target_feature_dim=None):
288
+ head_type = "linear_probe" if fine_tuning_strategy == "linear_probe" else "flexible"
289
+ ckpt_path = (
290
+ Path(__file__).resolve().parents[1]
291
+ / "radimagenet_ckpt"
292
+ / "resnet50"
293
+ / "ResNet50_RadImageNet.pt"
294
+ )
295
+ try:
296
+ if ckpt_path.exists():
297
+ model = models.resnet50(weights=None)
298
+ checkpoint = torch.load(str(ckpt_path), map_location="cpu")
299
+ state_dict = checkpoint.get("model", checkpoint.get("state_dict", checkpoint))
300
+ model_state_dict = model.state_dict()
301
+ filtered_state_dict = {}
302
+ for key, value in state_dict.items():
303
+ if key.startswith("fc.") or key.startswith("classifier."):
304
+ continue
305
+ mapped_key = key
306
+ if key.startswith("backbone."):
307
+ mapped_key = key[9:]
308
+ if mapped_key.startswith("4."):
309
+ mapped_key = "layer1." + mapped_key[2:]
310
+ elif mapped_key.startswith("5."):
311
+ mapped_key = "layer2." + mapped_key[2:]
312
+ elif mapped_key.startswith("6."):
313
+ mapped_key = "layer3." + mapped_key[2:]
314
+ elif mapped_key.startswith("7."):
315
+ mapped_key = "layer4." + mapped_key[2:]
316
+ elif mapped_key.startswith("0."):
317
+ mapped_key = "conv1." + mapped_key[2:]
318
+ elif mapped_key.startswith("1."):
319
+ mapped_key = "bn1." + mapped_key[2:]
320
+ elif key.startswith("features."):
321
+ mapped_key = key[9:]
322
+ if mapped_key in model_state_dict:
323
+ filtered_state_dict[mapped_key] = value
324
+ model.load_state_dict(filtered_state_dict, strict=False)
325
+ else:
326
+ model = models.resnet50(weights=ResNet50_Weights.IMAGENET1K_V1)
327
+ except Exception:
328
+ model = models.resnet50(weights=ResNet50_Weights.IMAGENET1K_V1)
329
+
330
+ feature_dim = model.fc.in_features
331
+ model.fc = ModelFactory._create_multitask_head(
332
+ feature_dim,
333
+ dropout,
334
+ biomarker_config,
335
+ head_type=head_type,
336
+ single_target_strategy=single_target_strategy,
337
+ target_feature_dim=target_feature_dim,
338
+ )
339
+ if fine_tuning_strategy == "linear_probe":
340
+ ModelFactory._freeze_for_linear_probe(model, model.fc)
341
+ return model
342
+
343
+
344
+ def get_model_memory_requirement(architecture):
345
+ """Get expected GPU memory requirement for supported public architectures."""
346
+ memory_map = {
347
+ "ResNet-18": "4-6GB",
348
+ "ResNet-34": "6-8GB",
349
+ "DenseNet-121": "8-10GB",
350
+ "EfficientNet-B0": "6-8GB",
351
+ "ViT-Small (DINOv2)": "8-10GB",
352
+ "Swin Transformer-Base": "12-16GB",
353
+ "ResNet-50 (RadImageNet)": "6-8GB",
354
+ }
355
+ return memory_map.get(architecture, "Unknown")
code/model/single_target_strategies.py ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Single Target Strategy Implementations
3
+ Handles different feature extraction strategies for single-target classification
4
+ """
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+ import torch.nn.functional as F
9
+ from enum import Enum
10
+ from typing import Union, Tuple, Optional
11
+
12
+
13
+ class SingleTargetStrategy(Enum):
14
+ """Enumeration of single-target classification strategies"""
15
+ DIRECT_CLASSIFICATION_HEAD = "Direct classification head"
16
+ CLS_TOKEN_CLASSIFICATION = "CLS token classification"
17
+ GLOBAL_AVERAGE_POOLING = "Global average pooling"
18
+
19
+
20
+ class FeatureExtractor(nn.Module):
21
+ """Base class for feature extraction strategies"""
22
+
23
+ def __init__(self, strategy: SingleTargetStrategy):
24
+ super().__init__()
25
+ self.strategy = strategy
26
+
27
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
28
+ """Extract features based on the strategy"""
29
+ raise NotImplementedError
30
+
31
+
32
+ class DirectClassificationHeadExtractor(FeatureExtractor):
33
+ """
34
+ Direct classification head strategy for CNN-based models
35
+ Uses global average pooling followed by classification head
36
+ """
37
+
38
+ def __init__(self, input_dim: int, feature_dim: int = 512, dropout: float = 0.1):
39
+ super().__init__(SingleTargetStrategy.DIRECT_CLASSIFICATION_HEAD)
40
+
41
+ # Global average pooling for spatial features
42
+ self.global_pool = nn.AdaptiveAvgPool2d((1, 1))
43
+
44
+ # Feature processing layers
45
+ self.feature_processor = nn.Sequential(
46
+ nn.Flatten(),
47
+ nn.Linear(input_dim, feature_dim),
48
+ nn.ReLU(inplace=True),
49
+ nn.Dropout(dropout),
50
+ nn.BatchNorm1d(feature_dim)
51
+ )
52
+
53
+ self.output_dim = feature_dim
54
+
55
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
56
+ """
57
+ Extract features using direct classification head approach
58
+
59
+ Args:
60
+ x: Input features [B, C, H, W] (spatial features from CNN) or [B, input_dim] (flattened features)
61
+
62
+ Returns:
63
+ Processed features [B, feature_dim]
64
+ """
65
+ # Check if input is spatial (4D) or flattened (2D)
66
+ if x.dim() == 4:
67
+ # Spatial features: apply global average pooling first
68
+ pooled = self.global_pool(x) # [B, C, 1, 1]
69
+ features = self.feature_processor(pooled) # [B, feature_dim]
70
+ elif x.dim() == 2:
71
+ # Flattened features: skip global pooling, go directly to feature processing
72
+ # But we need to adjust the input dimension for the linear layer
73
+ # The input_dim in the constructor was for spatial features, but now we have flattened features
74
+ # We need to create a new feature processor for the flattened input
75
+ if not hasattr(self, 'flattened_processor'):
76
+ # Create a new processor for flattened features
77
+ self.flattened_processor = nn.Sequential(
78
+ nn.Linear(x.size(1), self.output_dim), # x.size(1) is the actual flattened dimension
79
+ nn.ReLU(inplace=True),
80
+ nn.Dropout(self.feature_processor[3].p), # Use same dropout rate
81
+ nn.LayerNorm(self.output_dim) # Use LayerNorm instead of BatchNorm to avoid single-sample issues
82
+ )
83
+ # Move to the same device as the input tensor
84
+ self.flattened_processor = self.flattened_processor.to(x.device)
85
+ features = self.flattened_processor(x) # [B, feature_dim]
86
+ else:
87
+ raise ValueError(f"Expected 2D or 4D input tensor, got {x.dim()}D tensor with shape {x.shape}")
88
+
89
+ return features
90
+
91
+
92
+ class CLSTokenClassificationExtractor(FeatureExtractor):
93
+ """
94
+ CLS token classification strategy for Transformer-based models
95
+ Extracts the CLS token from transformer output
96
+ """
97
+
98
+ def __init__(self, feature_dim: int = 768, dropout: float = 0.1):
99
+ super().__init__(SingleTargetStrategy.CLS_TOKEN_CLASSIFICATION)
100
+
101
+ # Feature processing for CLS token
102
+ self.feature_processor = nn.Sequential(
103
+ nn.Linear(feature_dim, feature_dim),
104
+ nn.ReLU(inplace=True),
105
+ nn.Dropout(dropout),
106
+ nn.LayerNorm(feature_dim)
107
+ )
108
+
109
+ self.output_dim = feature_dim
110
+
111
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
112
+ """
113
+ Extract CLS token features from transformer output
114
+
115
+ Args:
116
+ x: Transformer output [B, seq_len, feature_dim] or [B, feature_dim]
117
+
118
+ Returns:
119
+ CLS token features [B, feature_dim]
120
+ """
121
+ # Handle different input shapes
122
+ if x.dim() == 3: # [B, seq_len, feature_dim]
123
+ # Extract CLS token (first token)
124
+ cls_token = x[:, 0, :] # [B, feature_dim]
125
+ elif x.dim() == 2: # [B, feature_dim] - already extracted
126
+ cls_token = x
127
+ else:
128
+ raise ValueError(f"Unexpected input shape: {x.shape}")
129
+
130
+ # Process CLS token features
131
+ features = self.feature_processor(cls_token) # [B, feature_dim]
132
+
133
+ return features
134
+
135
+
136
+ class GlobalAveragePoolingExtractor(FeatureExtractor):
137
+ """
138
+ Global average pooling strategy for models with spatial feature maps
139
+ Used for VAE encoders and other models that output spatial features
140
+ """
141
+
142
+ def __init__(self, input_dim: int, feature_dim: int = 512, dropout: float = 0.1):
143
+ super().__init__(SingleTargetStrategy.GLOBAL_AVERAGE_POOLING)
144
+
145
+ # Global average pooling
146
+ self.global_pool = nn.AdaptiveAvgPool2d((1, 1))
147
+
148
+ # Feature processing layers
149
+ self.feature_processor = nn.Sequential(
150
+ nn.Flatten(),
151
+ nn.Linear(input_dim, feature_dim),
152
+ nn.ReLU(inplace=True),
153
+ nn.Dropout(dropout),
154
+ nn.BatchNorm1d(feature_dim)
155
+ )
156
+
157
+ self.output_dim = feature_dim
158
+
159
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
160
+ """
161
+ Extract features using global average pooling
162
+
163
+ Args:
164
+ x: Input features [B, C, H, W] (spatial features)
165
+
166
+ Returns:
167
+ Processed features [B, feature_dim]
168
+ """
169
+ # Apply global average pooling
170
+ pooled = self.global_pool(x) # [B, C, 1, 1]
171
+
172
+ # Process through feature layers
173
+ features = self.feature_processor(pooled) # [B, feature_dim]
174
+
175
+ return features
176
+
177
+
178
+ def create_feature_extractor(
179
+ strategy: Union[str, SingleTargetStrategy],
180
+ input_dim: int,
181
+ feature_dim: int = 512,
182
+ dropout: float = 0.1
183
+ ) -> FeatureExtractor:
184
+ """
185
+ Factory function to create appropriate feature extractor
186
+
187
+ Args:
188
+ strategy: Single-target strategy (string or enum)
189
+ input_dim: Input feature dimension
190
+ feature_dim: Output feature dimension
191
+ dropout: Dropout rate
192
+
193
+ Returns:
194
+ Appropriate FeatureExtractor instance
195
+ """
196
+ # Convert string to enum if needed
197
+ if isinstance(strategy, str):
198
+ strategy = SingleTargetStrategy(strategy)
199
+
200
+ if strategy == SingleTargetStrategy.DIRECT_CLASSIFICATION_HEAD:
201
+ return DirectClassificationHeadExtractor(input_dim, feature_dim, dropout)
202
+
203
+ elif strategy == SingleTargetStrategy.CLS_TOKEN_CLASSIFICATION:
204
+ return CLSTokenClassificationExtractor(feature_dim, dropout)
205
+
206
+ elif strategy == SingleTargetStrategy.GLOBAL_AVERAGE_POOLING:
207
+ return GlobalAveragePoolingExtractor(input_dim, feature_dim, dropout)
208
+
209
+ else:
210
+ raise ValueError(f"Unknown single-target strategy: {strategy}")
211
+
212
+
213
+ def extract_features_from_model_output(
214
+ model_output: torch.Tensor,
215
+ strategy: Union[str, SingleTargetStrategy],
216
+ input_dim: Optional[int] = None,
217
+ feature_dim: int = 512,
218
+ dropout: float = 0.1
219
+ ) -> torch.Tensor:
220
+ """
221
+ Extract features from model output based on strategy
222
+
223
+ Args:
224
+ model_output: Raw output from backbone model
225
+ strategy: Single-target strategy to use
226
+ input_dim: Input dimension (required for some strategies)
227
+ feature_dim: Output feature dimension
228
+ dropout: Dropout rate
229
+
230
+ Returns:
231
+ Extracted features [B, feature_dim]
232
+ """
233
+ # Convert string to enum if needed
234
+ if isinstance(strategy, str):
235
+ strategy = SingleTargetStrategy(strategy)
236
+
237
+ if strategy == SingleTargetStrategy.DIRECT_CLASSIFICATION_HEAD:
238
+ if input_dim is None:
239
+ raise ValueError("input_dim required for DIRECT_CLASSIFICATION_HEAD strategy")
240
+ extractor = DirectClassificationHeadExtractor(input_dim, feature_dim, dropout)
241
+ return extractor(model_output)
242
+
243
+ elif strategy == SingleTargetStrategy.CLS_TOKEN_CLASSIFICATION:
244
+ extractor = CLSTokenClassificationExtractor(feature_dim, dropout)
245
+ return extractor(model_output)
246
+
247
+ elif strategy == SingleTargetStrategy.GLOBAL_AVERAGE_POOLING:
248
+ if input_dim is None:
249
+ raise ValueError("input_dim required for GLOBAL_AVERAGE_POOLING strategy")
250
+ extractor = GlobalAveragePoolingExtractor(input_dim, feature_dim, dropout)
251
+ return extractor(model_output)
252
+
253
+ else:
254
+ raise ValueError(f"Unknown single-target strategy: {strategy}")
255
+
256
+
257
+ # Strategy mapping from config string values to enum values
258
+ STRATEGY_MAPPING = {
259
+ "Direct classification head": SingleTargetStrategy.DIRECT_CLASSIFICATION_HEAD,
260
+ "CLS token classification": SingleTargetStrategy.CLS_TOKEN_CLASSIFICATION,
261
+ "Global average pooling": SingleTargetStrategy.GLOBAL_AVERAGE_POOLING,
262
+ }
263
+
264
+
265
+ def get_strategy_from_name(strategy_name: str) -> SingleTargetStrategy:
266
+ """
267
+ Convert strategy string value to SingleTargetStrategy enum.
268
+
269
+ Args:
270
+ strategy_name: Strategy string from config/checkpoint
271
+
272
+ Returns:
273
+ SingleTargetStrategy enum value
274
+ """
275
+ if strategy_name not in STRATEGY_MAPPING:
276
+ raise ValueError(
277
+ f"Unknown strategy: {strategy_name}. Available: {list(STRATEGY_MAPPING.keys())}"
278
+ )
279
+
280
+ return STRATEGY_MAPPING[strategy_name]
281
+
code/requirements.txt ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Requirements for AbdCTBench training and testing
2
+ # Core ML libraries
3
+ torch>=1.12.0
4
+ torchvision>=0.13.0
5
+ timm>=0.9.0
6
+ transformers>=4.20.0
7
+ diffusers>=0.35.0
8
+
9
+ # Data handling
10
+ pandas>=1.5.0
11
+ numpy>=1.21.0
12
+ scikit-learn>=1.1.0
13
+ Pillow>=9.0.0
14
+
15
+ # Logging and visualization
16
+ tensorboard>=2.10.0
17
+ tqdm>=4.64.0
18
+ safetensors>=0.4.0
19
+
20
+ # Medical imaging (optional, for future enhancements)
21
+ # nibabel>=4.0.0
22
+ # pydicom>=2.3.0
23
+
24
+ # Development tools
25
+ pytest>=7.0.0
26
+ black>=22.0.0
27
+ flake8>=5.0.0
28
+
29
+ # Additional utilities
30
+ pyyaml>=6.0
31
+ matplotlib>=3.5.0
32
+ seaborn>=0.11.0
code/test.py ADDED
@@ -0,0 +1,939 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Flexible Multi-Task Testing Script
3
+ Supports any biomarker configuration and model architecture
4
+ """
5
+
6
+ import os
7
+ import torch
8
+ import torch.nn.functional as F
9
+ from torch.utils.data import DataLoader
10
+ from torchvision import transforms
11
+ from argparse import ArgumentParser
12
+ from tqdm import tqdm
13
+ import numpy as np
14
+ import json
15
+ from typing import Dict, Any, List, Tuple
16
+
17
+ from dataset import ClassifierDataset, PredictionDataset
18
+ from model.model_factory import ModelFactory
19
+ from model.flexible_multitask_head import FlexibleMetricsCalculator
20
+ from config.biomarker_config import FlexibleBiomarkerConfig
21
+ from config.experiment_config import ExperimentConfig, DEFAULT_AUGMENTATIONS
22
+ from sklearn.metrics import roc_auc_score, average_precision_score, mean_absolute_error, mean_squared_error, r2_score
23
+ from sklearn.exceptions import UndefinedMetricWarning
24
+ import warnings
25
+ warnings.filterwarnings("ignore", category=UndefinedMetricWarning)
26
+
27
+ try:
28
+ from safetensors.torch import load_file as safetensors_load_file
29
+ except ImportError:
30
+ safetensors_load_file = None
31
+
32
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
33
+
34
+ def arg_parse():
35
+ parser = ArgumentParser(description='Flexible Multi-Task Testing')
36
+ parser.add_argument('--data_dir', required=True, help='Directory with test data')
37
+ parser.add_argument(
38
+ '--checkpoint_path',
39
+ required=True,
40
+ help='Path to model checkpoint (.pth/.pt or .safetensors).'
41
+ )
42
+ parser.add_argument('--biomarker_config', required=True, help='Path to biomarker configuration file (YAML or JSON)')
43
+ parser.add_argument('--output_dir', default='test_results', help='Output directory for results')
44
+ parser.add_argument('--size', default=256, type=int, help='Image size')
45
+ parser.add_argument('--only_pred', action='store_true', help='Only generate predictions (no ground truth evaluation)')
46
+ parser.add_argument('--batch_size', default=16, type=int, help='Batch size for inference')
47
+ parser.add_argument('--save_predictions', action='store_true', help='Save individual predictions to CSV')
48
+ parser.add_argument('--save_metrics', action='store_true', help='Save detailed metrics to JSON file')
49
+ parser.add_argument('--use_val_for_thresholds', action='store_true',
50
+ help='Use validation set for threshold optimization (default: use same data_dir)')
51
+ parser.add_argument('--val_data_dir', help='Path to validation data directory (if different from data_dir)')
52
+ parser.add_argument('--test_csv', default='test.csv', help='CSV file to use for testing (default: test.csv)')
53
+ parser.add_argument(
54
+ '--legacy_checkpoint_compat',
55
+ action='store_true',
56
+ help='Enable compatibility loading for older checkpoint key layouts.'
57
+ )
58
+ return parser.parse_args()
59
+
60
+ def load_checkpoint(checkpoint_path: str, legacy_compat: bool = False) -> Dict[str, Any]:
61
+ """Load checkpoint in current format, optionally with legacy compatibility."""
62
+ if not os.path.exists(checkpoint_path):
63
+ raise FileNotFoundError(f"Checkpoint not found: {checkpoint_path}")
64
+
65
+ print(f"Loading checkpoint from: {checkpoint_path}")
66
+ checkpoint_ext = os.path.splitext(checkpoint_path)[1].lower()
67
+
68
+ if checkpoint_ext == ".safetensors":
69
+ if safetensors_load_file is None:
70
+ raise ImportError(
71
+ "safetensors is required to load .safetensors checkpoints. "
72
+ "Install with: pip install safetensors"
73
+ )
74
+
75
+ model_state_dict = safetensors_load_file(checkpoint_path, device="cpu")
76
+ checkpoint_dir = os.path.dirname(checkpoint_path)
77
+ config_path = os.path.join(checkpoint_dir, "config.json")
78
+ thresholds_path = os.path.join(checkpoint_dir, "optimal_thresholds.json")
79
+
80
+ config = {}
81
+ if os.path.exists(config_path):
82
+ with open(config_path, "r") as f:
83
+ config = json.load(f)
84
+ else:
85
+ print(f"Warning: no config.json found next to safetensors file: {config_path}")
86
+
87
+ optimal_thresholds = {}
88
+ if os.path.exists(thresholds_path):
89
+ with open(thresholds_path, "r") as f:
90
+ optimal_thresholds = json.load(f)
91
+
92
+ checkpoint = {
93
+ "model_state_dict": model_state_dict,
94
+ "config": config,
95
+ "optimal_thresholds": optimal_thresholds,
96
+ }
97
+ else:
98
+ checkpoint = torch.load(checkpoint_path, map_location='cpu', weights_only=False)
99
+
100
+ if legacy_compat and "model_state_dict" not in checkpoint and "state_dict" in checkpoint:
101
+ checkpoint = {
102
+ "model_state_dict": checkpoint["state_dict"],
103
+ "config": checkpoint.get("config", {}),
104
+ "epoch": checkpoint.get("epoch", 0),
105
+ "val_metrics": checkpoint.get("val_metrics", {}),
106
+ }
107
+
108
+ required_keys = ["model_state_dict", "config"]
109
+ missing = [k for k in required_keys if k not in checkpoint]
110
+ if missing:
111
+ raise ValueError(
112
+ f"Checkpoint is missing required keys: {missing}. "
113
+ "Please use a checkpoint produced by the current train.py pipeline."
114
+ )
115
+
116
+ return checkpoint
117
+
118
+
119
+ def _remap_legacy_state_dict_keys(state_dict: Dict[str, Any]) -> Dict[str, Any]:
120
+ """Apply lightweight key remapping for common legacy checkpoint layouts."""
121
+ remapped = {}
122
+ for key, value in state_dict.items():
123
+ new_key = key
124
+ if new_key.startswith("module."):
125
+ new_key = new_key[len("module."):]
126
+ if new_key.startswith("resnet34.fc."):
127
+ new_key = "fc." + new_key[len("resnet34.fc."):]
128
+ elif new_key.startswith("resnet18.fc."):
129
+ new_key = "fc." + new_key[len("resnet18.fc."):]
130
+ elif new_key.startswith("resnet50.fc."):
131
+ new_key = "fc." + new_key[len("resnet50.fc."):]
132
+ remapped[new_key] = value
133
+ return remapped
134
+
135
+
136
+ def _materialize_lazy_modules_from_state_dict(
137
+ model: torch.nn.Module,
138
+ state_dict: Dict[str, Any],
139
+ dropout: float,
140
+ ) -> None:
141
+ """
142
+ Materialize lazily-created modules (e.g., flattened_processor) before load_state_dict.
143
+ """
144
+ weight_key = "classifier.feature_extractor.flattened_processor.0.weight"
145
+ if (
146
+ weight_key in state_dict
147
+ and hasattr(model, "classifier")
148
+ and hasattr(model.classifier, "feature_extractor")
149
+ and not hasattr(model.classifier.feature_extractor, "flattened_processor")
150
+ ):
151
+ linear_weight = state_dict[weight_key]
152
+ out_dim, in_dim = linear_weight.shape
153
+ model.classifier.feature_extractor.flattened_processor = torch.nn.Sequential(
154
+ torch.nn.Linear(in_dim, out_dim),
155
+ torch.nn.ReLU(inplace=True),
156
+ torch.nn.Dropout(dropout),
157
+ torch.nn.LayerNorm(out_dim),
158
+ )
159
+
160
+
161
+ def create_model_from_checkpoint(
162
+ checkpoint: Dict[str, Any],
163
+ biomarker_config: FlexibleBiomarkerConfig,
164
+ legacy_compat: bool = False
165
+ ) -> Tuple[torch.nn.Module, ExperimentConfig]:
166
+ """Create model + config from checkpoint."""
167
+ config_dict = checkpoint["config"]
168
+
169
+ # Create experiment config with all required parameters
170
+ config = ExperimentConfig(
171
+ model=config_dict.get('model', 'ResNet-18'),
172
+ loss_function=config_dict.get('loss_function', 'CE'),
173
+ must_include=config_dict.get('must_include', True),
174
+ learning_rate=config_dict.get('learning_rate', 1e-3),
175
+ batch_size=config_dict.get('batch_size', 16),
176
+ weight_decay=config_dict.get('weight_decay', 1e-5),
177
+ optimizer=config_dict.get('optimizer', 'AdamW'),
178
+ scheduler=config_dict.get('scheduler', 'CosineAnnealing'),
179
+ image_augmentations=config_dict.get('image_augmentations', DEFAULT_AUGMENTATIONS.copy()),
180
+ dropout=config_dict.get('dropout', 0.1),
181
+ loss_specific_params=config_dict.get('loss_specific_params', 'class_weights=inverse_frequency'),
182
+ multi_target_strategy=config_dict.get('multi_target_strategy', 'Shared backbone + task-specific heads'),
183
+ single_target_strategy=config_dict.get('single_target_strategy', ''),
184
+ pretrained_weights=config_dict.get('pretrained_weights', 'ImageNet'),
185
+ fine_tuning_strategy=config_dict.get('fine_tuning_strategy', 'full'),
186
+ expected_gpu_memory=config_dict.get('expected_gpu_memory', '8-10GB'),
187
+ architectural_family=config_dict.get('architectural_family', 'CNN'),
188
+ class_weighting=config_dict.get('class_weighting', 'inverse_frequency'),
189
+ sampling_strategy=config_dict.get('sampling_strategy', 'balanced_batch'),
190
+ threshold_selection=config_dict.get('threshold_selection', 'F1_optimal')
191
+ )
192
+ single_target_strategy = config_dict.get('single_target_strategy', '')
193
+
194
+ print(f"Creating model: {config.model}")
195
+ print(f"Fine-tuning strategy: {config.fine_tuning_strategy}")
196
+ if single_target_strategy:
197
+ print(f"Single-target strategy: {single_target_strategy}")
198
+
199
+ # Align optional target feature dimension with saved task head input if present.
200
+ expected_head_dim = None
201
+ for key, tensor in checkpoint['model_state_dict'].items():
202
+ if '.task_heads.' in key and key.endswith('.weight'):
203
+ expected_head_dim = tensor.shape[1]
204
+ break
205
+
206
+ # Create model using ModelFactory
207
+ model = ModelFactory.create_model(
208
+ architecture=config.model,
209
+ num_classes=biomarker_config.total_output_size,
210
+ pretrained_weights=config.pretrained_weights,
211
+ fine_tuning_strategy=config.fine_tuning_strategy,
212
+ dropout=config.dropout,
213
+ biomarker_config=biomarker_config,
214
+ single_target_strategy=single_target_strategy,
215
+ single_target_output_dim=expected_head_dim
216
+ )
217
+
218
+ state_dict_to_load = checkpoint['model_state_dict']
219
+ if legacy_compat:
220
+ state_dict_to_load = _remap_legacy_state_dict_keys(state_dict_to_load)
221
+
222
+ _materialize_lazy_modules_from_state_dict(
223
+ model=model,
224
+ state_dict=state_dict_to_load,
225
+ dropout=config.dropout,
226
+ )
227
+
228
+ missing_keys, unexpected_keys = model.load_state_dict(state_dict_to_load, strict=False)
229
+ if missing_keys or unexpected_keys:
230
+ print("State dict loading warnings:")
231
+ if missing_keys:
232
+ print(f" Missing keys: {missing_keys[:5]}{'...' if len(missing_keys) > 5 else ''}")
233
+ if unexpected_keys:
234
+ print(f" Unexpected keys: {unexpected_keys[:5]}{'...' if len(unexpected_keys) > 5 else ''}")
235
+ print("Model loaded successfully despite key mismatches")
236
+ else:
237
+ print("Model state dict loaded perfectly!")
238
+
239
+ model.to(device)
240
+ model.eval()
241
+
242
+ total_params = sum(p.numel() for p in model.parameters())
243
+ trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
244
+ print(f"Model loaded successfully!")
245
+ print(f"Total parameters: {total_params:,}")
246
+ print(f"Trainable parameters: {trainable_params:,}")
247
+
248
+ return model, config
249
+
250
+ def create_test_transforms(config: ExperimentConfig) -> transforms.Compose:
251
+ """Create test transforms that match training preprocessing exactly"""
252
+
253
+ # CRITICAL: Parse augmentation string to get the EXACT same settings as training
254
+ from config.experiment_config import parse_augmentation_string
255
+ aug_params = parse_augmentation_string(config.image_augmentations)
256
+
257
+ print(f"Test preprocessing settings:")
258
+ print(f" Pretrained weights: {config.pretrained_weights}")
259
+ print(f" ImageNet normalization: {aug_params['imagenet_norm']}")
260
+ print(f" Image augmentations: {config.image_augmentations}")
261
+
262
+ transform_list = [transforms.ToTensor()]
263
+
264
+ # CRITICAL: Convert grayscale to 3-channel for pre-trained models (matches train.py)
265
+ transform_list.append(transforms.Lambda(lambda x: x.repeat(3, 1, 1)))
266
+
267
+ # CRITICAL: Use the EXACT same normalization logic as train.py
268
+ # Only apply normalization if aug_params['imagenet_norm'] is True
269
+ if aug_params['imagenet_norm']:
270
+ if config.pretrained_weights == "ImageNet":
271
+ # Use ImageNet normalization for ImageNet pre-trained models
272
+ transform_list.append(transforms.Normalize(
273
+ mean=[0.485, 0.456, 0.406],
274
+ std=[0.229, 0.224, 0.225]
275
+ ))
276
+ elif config.pretrained_weights == "RadImageNet":
277
+ # Use RadImageNet normalization (medical imaging specific)
278
+ transform_list.append(transforms.Normalize(
279
+ mean=[0.485, 0.456, 0.406], # Using ImageNet stats as fallback
280
+ std=[0.229, 0.224, 0.225] # RadImageNet likely uses similar normalization
281
+ ))
282
+ else:
283
+ # Use CT-specific normalization for non-pretrained models
284
+ transform_list.append(transforms.Normalize(
285
+ mean=[0.55001191, 0.55001191, 0.55001191],
286
+ std=[0.18854326, 0.18854326, 0.18854326]
287
+ ))
288
+ print(f"Normalization applied: {config.pretrained_weights} normalization")
289
+ else:
290
+ print(f"No normalization applied (imagenet_norm=False)")
291
+
292
+ return transforms.Compose(transform_list)
293
+
294
+ def create_test_dataset(data_dir: str, biomarker_config: FlexibleBiomarkerConfig,
295
+ config: ExperimentConfig, size: int = 256, only_pred: bool = False,
296
+ test_csv: str = 'test.csv', batch_size: int = 16) -> DataLoader:
297
+ """Create test dataset and dataloader with matching preprocessing"""
298
+
299
+ # Create transforms that match training exactly
300
+ transform = create_test_transforms(config)
301
+
302
+ if only_pred:
303
+ # Test dataset without labels
304
+ test_dataset = PredictionDataset(data_dir, transforms=transform, size=size)
305
+ print(f"Created test dataset with {len(test_dataset)} images (prediction only)")
306
+ else:
307
+ # Use unified classifier dataset with explicit CSV selection
308
+ test_dataset = ClassifierDataset(
309
+ data_dir,
310
+ biomarker_config,
311
+ transforms=transform,
312
+ size=size,
313
+ train=False,
314
+ csv_file=test_csv
315
+ )
316
+ print(f"Created test dataset with {len(test_dataset)} samples")
317
+
318
+ return DataLoader(
319
+ dataset=test_dataset,
320
+ batch_size=batch_size,
321
+ shuffle=False,
322
+ num_workers=4,
323
+ pin_memory=True
324
+ )
325
+
326
+ def process_predictions(predictions: torch.Tensor, biomarker_config: FlexibleBiomarkerConfig) -> Dict[str, Any]:
327
+ """Process raw predictions into interpretable outputs"""
328
+
329
+ results = {}
330
+ tensor_layout = biomarker_config.get_tensor_layout()
331
+
332
+ # Process each biomarker type
333
+ for biomarker in biomarker_config.binary_biomarkers:
334
+ layout = tensor_layout[biomarker.name]
335
+ pred_slice = predictions[:, layout.start_idx:layout.end_idx] # [B, 1]
336
+
337
+ # Apply sigmoid for binary classification
338
+ prob = torch.sigmoid(pred_slice).cpu().numpy().flatten()
339
+ results[biomarker.name] = prob
340
+
341
+ for biomarker in biomarker_config.multiclass_biomarkers:
342
+ layout = tensor_layout[biomarker.name]
343
+ pred_slice = predictions[:, layout.start_idx:layout.end_idx] # [B, num_classes]
344
+
345
+ # Apply softmax for multiclass classification
346
+ prob = F.softmax(pred_slice, dim=1).cpu().numpy()
347
+ pred_class = np.argmax(prob, axis=1)
348
+
349
+ results[f"{biomarker.name}_probabilities"] = prob
350
+ results[f"{biomarker.name}_predicted_class"] = pred_class
351
+
352
+ for biomarker in biomarker_config.continuous_biomarkers:
353
+ layout = tensor_layout[biomarker.name]
354
+ pred_slice = predictions[:, layout.start_idx:layout.end_idx] # [B, 1]
355
+
356
+ # Denormalize continuous predictions
357
+ raw_pred = pred_slice.cpu().numpy().flatten()
358
+ denormalized_pred = []
359
+
360
+ for val in raw_pred:
361
+ denormalized_val = biomarker.denormalize(val)
362
+ denormalized_pred.append(denormalized_val)
363
+
364
+ denormalized_pred = np.array(denormalized_pred)
365
+
366
+ results[biomarker.name] = denormalized_pred
367
+
368
+ return results
369
+
370
+ def find_optimal_thresholds_on_validation(model: torch.nn.Module, biomarker_config: FlexibleBiomarkerConfig,
371
+ data_dir: str, config: ExperimentConfig, size: int = 256, batch_size: int = 16) -> Dict[str, float]:
372
+ """Find optimal thresholds by running inference on validation set"""
373
+
374
+ print("Finding optimal thresholds on validation set...")
375
+
376
+ # Create validation dataset (use train=False to get val.csv)
377
+ transform = create_test_transforms(config)
378
+ val_dataset = ClassifierDataset(data_dir, biomarker_config, transforms=transform, size=size, train=False)
379
+
380
+ val_dataloader = DataLoader(
381
+ dataset=val_dataset,
382
+ batch_size=batch_size,
383
+ shuffle=False,
384
+ num_workers=4,
385
+ pin_memory=True
386
+ )
387
+
388
+ # Run inference on validation set
389
+ all_predictions = []
390
+ all_targets = []
391
+
392
+ model.eval()
393
+ with torch.no_grad():
394
+ for batch_idx, (images, targets) in enumerate(tqdm(val_dataloader, desc="Validation inference")):
395
+ images = images.to(device)
396
+ targets = targets.to(device)
397
+
398
+ # Convert single channel to 3-channel for models expecting RGB (matches train.py validation)
399
+ if images.shape[1] == 1:
400
+ images = images.repeat(1, 3, 1, 1)
401
+
402
+ # Forward pass
403
+ predictions = model(images)
404
+
405
+ all_predictions.append(predictions.detach().cpu())
406
+ all_targets.append(targets.detach().cpu())
407
+
408
+ # Concatenate all predictions and targets
409
+ all_predictions = torch.cat(all_predictions, dim=0)
410
+ all_targets = torch.cat(all_targets, dim=0)
411
+
412
+ # Find optimal thresholds
413
+ optimal_thresholds = {}
414
+ tensor_layout = biomarker_config.get_tensor_layout()
415
+
416
+ # Get threshold search parameters from biomarker config (matches training exactly)
417
+ validation_config = biomarker_config.validation
418
+ threshold_range = validation_config.get('threshold_search_range', [0.1, 0.9])
419
+ threshold_steps = validation_config.get('threshold_search_steps', 9)
420
+ optimization_metric = validation_config.get('optimization_metric', 'f1_score')
421
+ fallback_threshold = validation_config.get('fallback_threshold', 0.5)
422
+
423
+ print(f"Using threshold search: {threshold_steps} steps from {threshold_range[0]} to {threshold_range[1]}")
424
+ print(f"Optimizing for: {optimization_metric}")
425
+
426
+ # Convert to numpy
427
+ predictions_np = all_predictions.numpy()
428
+ targets_np = all_targets.numpy()
429
+
430
+ for biomarker in biomarker_config.binary_biomarkers:
431
+ layout = tensor_layout[biomarker.name]
432
+
433
+ pred_logits = predictions_np[:, layout.start_idx]
434
+ pred_probs = 1 / (1 + np.exp(-pred_logits)) # Sigmoid
435
+ true_labels = targets_np[:, layout.start_idx].astype(int)
436
+
437
+ # Skip if all labels are the same
438
+ if len(np.unique(true_labels)) < 2:
439
+ optimal_thresholds[biomarker.name] = fallback_threshold
440
+ print(f" {biomarker.name}: Using fallback threshold ({fallback_threshold}) - insufficient label diversity")
441
+ continue
442
+
443
+ # Find optimal threshold using the configured metric
444
+ best_threshold = fallback_threshold
445
+ best_score = 0.0
446
+
447
+ # Use the EXACT same threshold search parameters as training
448
+ for threshold in np.linspace(threshold_range[0], threshold_range[1], threshold_steps):
449
+ pred_labels = (pred_probs > threshold).astype(int)
450
+
451
+ # Calculate the optimization metric
452
+ tp = np.sum((pred_labels == 1) & (true_labels == 1))
453
+ fp = np.sum((pred_labels == 1) & (true_labels == 0))
454
+ fn = np.sum((pred_labels == 0) & (true_labels == 1))
455
+ tn = np.sum((pred_labels == 0) & (true_labels == 0))
456
+
457
+ # Calculate metric based on configuration
458
+ if optimization_metric == 'f1_score' and tp + fp > 0 and tp + fn > 0:
459
+ precision = tp / (tp + fp)
460
+ recall = tp / (tp + fn)
461
+ score = 2 * (precision * recall) / (precision + recall)
462
+ elif optimization_metric == 'accuracy':
463
+ score = (tp + tn) / (tp + tn + fp + fn) if (tp + tn + fp + fn) > 0 else 0.0
464
+ elif optimization_metric == 'precision' and tp + fp > 0:
465
+ score = tp / (tp + fp)
466
+ elif optimization_metric == 'recall' and tp + fn > 0:
467
+ score = tp / (tp + fn)
468
+ elif optimization_metric == 'specificity' and tn + fp > 0:
469
+ score = tn / (tn + fp)
470
+ else:
471
+ score = 0.0 # Fallback
472
+
473
+ if score > best_score:
474
+ best_score = score
475
+ best_threshold = threshold
476
+
477
+ optimal_thresholds[biomarker.name] = best_threshold
478
+ print(f" {biomarker.name}: threshold={best_threshold:.3f}, {optimization_metric}={best_score:.3f}")
479
+
480
+ return optimal_thresholds
481
+
482
+ def bootstrap_metric_ci(y_true, y_pred, metric_fn, n_bootstraps=1000, ci=0.95, seed=42):
483
+ """Calculate bootstrapped confidence intervals for a metric"""
484
+ rng = np.random.RandomState(seed)
485
+ scores = []
486
+
487
+ for _ in range(n_bootstraps):
488
+ indices = rng.randint(0, len(y_pred), len(y_pred))
489
+ if len(np.unique(y_true[indices])) < 2:
490
+ continue
491
+ try:
492
+ score = metric_fn(y_true[indices], y_pred[indices])
493
+ if not np.isnan(score):
494
+ scores.append(score)
495
+ except (ValueError, ZeroDivisionError):
496
+ continue
497
+
498
+ if len(scores) < 10: # Need minimum samples for reliable CI
499
+ return np.nan, np.nan
500
+
501
+ sorted_scores = np.sort(scores)
502
+ lower = np.percentile(sorted_scores, ((1.0 - ci) / 2.0) * 100)
503
+ upper = np.percentile(sorted_scores, (1 - (1.0 - ci) / 2.0) * 100)
504
+ return lower, upper
505
+
506
+ def calculate_enhanced_metrics(predictions: torch.Tensor, targets: torch.Tensor,
507
+ biomarker_config: FlexibleBiomarkerConfig,
508
+ optimal_thresholds: Dict[str, float] = None) -> Dict[str, Any]:
509
+ """Calculate enhanced metrics with bootstrapped confidence intervals"""
510
+
511
+ # Convert to numpy
512
+ if isinstance(predictions, torch.Tensor):
513
+ predictions = predictions.detach().cpu().numpy()
514
+ if isinstance(targets, torch.Tensor):
515
+ targets = targets.detach().cpu().numpy()
516
+
517
+ all_metrics = {}
518
+ tensor_layout = biomarker_config.get_tensor_layout()
519
+
520
+ # Binary classification metrics
521
+ for biomarker in biomarker_config.binary_biomarkers:
522
+ layout = tensor_layout[biomarker.name]
523
+
524
+ pred_logits = predictions[:, layout.start_idx]
525
+ pred_probs = 1 / (1 + np.exp(-pred_logits)) # Sigmoid
526
+ true_labels = targets[:, layout.start_idx].astype(int)
527
+
528
+ # Skip if all labels are the same
529
+ if len(np.unique(true_labels)) < 2:
530
+ continue
531
+
532
+ # Get optimal threshold
533
+ threshold = optimal_thresholds.get(biomarker.name, 0.5) if optimal_thresholds else 0.5
534
+ pred_labels = (pred_probs > threshold).astype(int)
535
+
536
+ # Calculate metrics
537
+ metrics = {}
538
+
539
+ # AUROC (threshold-independent)
540
+ try:
541
+ auroc = roc_auc_score(true_labels, pred_probs)
542
+ auroc_ci = bootstrap_metric_ci(true_labels, pred_probs, roc_auc_score)
543
+ metrics['auroc'] = auroc
544
+ metrics['auroc_ci'] = auroc_ci
545
+ except (ValueError, ZeroDivisionError):
546
+ metrics['auroc'] = np.nan
547
+ metrics['auroc_ci'] = (np.nan, np.nan)
548
+
549
+ # Confusion matrix components
550
+ tp = np.sum((pred_labels == 1) & (true_labels == 1))
551
+ tn = np.sum((pred_labels == 0) & (true_labels == 0))
552
+ fp = np.sum((pred_labels == 1) & (true_labels == 0))
553
+ fn = np.sum((pred_labels == 0) & (true_labels == 1))
554
+
555
+ # Precision, Recall, Specificity, F1
556
+ precision = tp / (tp + fp) if (tp + fp) > 0 else 0.0
557
+ recall = tp / (tp + fn) if (tp + fn) > 0 else 0.0
558
+ specificity = tn / (tn + fp) if (tn + fp) > 0 else 0.0
559
+ f1 = 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0.0
560
+ accuracy = (tp + tn) / (tp + tn + fp + fn)
561
+
562
+ # Calculate confidence intervals for threshold-dependent metrics
563
+ def precision_fn(y_true, y_pred):
564
+ pred_binary = (y_pred > threshold).astype(int)
565
+ tp = np.sum((pred_binary == 1) & (y_true == 1))
566
+ fp = np.sum((pred_binary == 1) & (y_true == 0))
567
+ return tp / (tp + fp) if (tp + fp) > 0 else 0.0
568
+
569
+ def recall_fn(y_true, y_pred):
570
+ pred_binary = (y_pred > threshold).astype(int)
571
+ tp = np.sum((pred_binary == 1) & (y_true == 1))
572
+ fn = np.sum((pred_binary == 0) & (y_true == 1))
573
+ return tp / (tp + fn) if (tp + fn) > 0 else 0.0
574
+
575
+ def specificity_fn(y_true, y_pred):
576
+ pred_binary = (y_pred > threshold).astype(int)
577
+ tn = np.sum((pred_binary == 0) & (y_true == 0))
578
+ fp = np.sum((pred_binary == 1) & (y_true == 0))
579
+ return tn / (tn + fp) if (tn + fp) > 0 else 0.0
580
+
581
+ def f1_fn(y_true, y_pred):
582
+ pred_binary = (y_pred > threshold).astype(int)
583
+ tp = np.sum((pred_binary == 1) & (y_true == 1))
584
+ fp = np.sum((pred_binary == 1) & (y_true == 0))
585
+ fn = np.sum((pred_binary == 0) & (y_true == 1))
586
+ prec = tp / (tp + fp) if (tp + fp) > 0 else 0.0
587
+ rec = tp / (tp + fn) if (tp + fn) > 0 else 0.0
588
+ return 2 * (prec * rec) / (prec + rec) if (prec + rec) > 0 else 0.0
589
+
590
+ def accuracy_fn(y_true, y_pred):
591
+ pred_binary = (y_pred > threshold).astype(int)
592
+ return (pred_binary == y_true).mean()
593
+
594
+ # Calculate confidence intervals
595
+ precision_ci = bootstrap_metric_ci(true_labels, pred_probs, precision_fn)
596
+ recall_ci = bootstrap_metric_ci(true_labels, pred_probs, recall_fn)
597
+ specificity_ci = bootstrap_metric_ci(true_labels, pred_probs, specificity_fn)
598
+ f1_ci = bootstrap_metric_ci(true_labels, pred_probs, f1_fn)
599
+ accuracy_ci = bootstrap_metric_ci(true_labels, pred_probs, accuracy_fn)
600
+
601
+ # Store metrics
602
+ metrics.update({
603
+ 'precision': precision,
604
+ 'precision_ci': precision_ci,
605
+ 'recall': recall,
606
+ 'recall_ci': recall_ci,
607
+ 'specificity': specificity,
608
+ 'specificity_ci': specificity_ci,
609
+ 'f1_score': f1,
610
+ 'f1_score_ci': f1_ci,
611
+ 'accuracy': accuracy,
612
+ 'accuracy_ci': accuracy_ci,
613
+ 'threshold_used': threshold
614
+ })
615
+
616
+ all_metrics[biomarker.name] = metrics
617
+
618
+ # Regression metrics
619
+ for biomarker in biomarker_config.continuous_biomarkers:
620
+ layout = tensor_layout[biomarker.name]
621
+
622
+ pred_values_raw = predictions[:, layout.start_idx]
623
+ true_values_raw = targets[:, layout.start_idx]
624
+
625
+ # CRITICAL FIX: Do NOT apply sigmoid to regression predictions!
626
+ # Regression models output raw continuous values, not probabilities
627
+ # The model was trained without sigmoid activation for continuous outputs
628
+
629
+ # Denormalize predictions and targets for proper metric calculation
630
+ pred_values_denorm = np.array([biomarker.denormalize(val) for val in pred_values_raw])
631
+ true_values_denorm = np.array([biomarker.denormalize(val) for val in true_values_raw])
632
+
633
+
634
+ # Calculate metrics on denormalized values
635
+ mae = mean_absolute_error(true_values_denorm, pred_values_denorm)
636
+ mse = mean_squared_error(true_values_denorm, pred_values_denorm)
637
+ r2 = r2_score(true_values_denorm, pred_values_denorm)
638
+
639
+ # Calculate confidence intervals on denormalized values
640
+ def mae_fn(y_true, y_pred):
641
+ return mean_absolute_error(y_true, y_pred)
642
+
643
+ def mse_fn(y_true, y_pred):
644
+ return mean_squared_error(y_true, y_pred)
645
+
646
+ def r2_fn(y_true, y_pred):
647
+ return r2_score(y_true, y_pred)
648
+
649
+ mae_ci = bootstrap_metric_ci(true_values_denorm, pred_values_denorm, mae_fn)
650
+ mse_ci = bootstrap_metric_ci(true_values_denorm, pred_values_denorm, mse_fn)
651
+ r2_ci = bootstrap_metric_ci(true_values_denorm, pred_values_denorm, r2_fn)
652
+
653
+ all_metrics[biomarker.name] = {
654
+ 'mae': mae,
655
+ 'mae_ci': mae_ci,
656
+ 'mse': mse,
657
+ 'mse_ci': mse_ci,
658
+ 'r2_score': r2,
659
+ 'r2_score_ci': r2_ci
660
+ }
661
+
662
+ return all_metrics
663
+
664
+ def run_inference(model: torch.nn.Module, test_dataloader: DataLoader,
665
+ biomarker_config: FlexibleBiomarkerConfig, optimal_thresholds: Dict[str, float] = None,
666
+ only_pred: bool = False) -> Dict[str, Any]:
667
+ """Run inference on test set"""
668
+
669
+ all_results = {}
670
+ all_targets = {}
671
+ all_predictions = []
672
+ study_ids = []
673
+
674
+ print("Running inference...")
675
+
676
+ with torch.no_grad():
677
+ for batch_idx, batch_data in enumerate(tqdm(test_dataloader)):
678
+ if only_pred:
679
+ images = batch_data
680
+ batch_study_ids = [test_dataloader.dataset.at(batch_idx * args.batch_size + i)
681
+ for i in range(len(images))]
682
+ else:
683
+ images, targets = batch_data
684
+ targets = targets.to(device)
685
+ batch_study_ids = [test_dataloader.dataset.at(batch_idx * args.batch_size + i)
686
+ for i in range(len(images))]
687
+
688
+ # Store targets for metrics calculation
689
+ for i, target in enumerate(targets):
690
+ study_id = batch_study_ids[i]
691
+ all_targets[study_id] = target.cpu().numpy()
692
+
693
+ images = images.to(device)
694
+
695
+ # Convert single channel to 3-channel for models expecting RGB (matches train.py validation)
696
+ if images.shape[1] == 1:
697
+ images = images.repeat(1, 3, 1, 1)
698
+
699
+ # Forward pass
700
+ predictions = model(images)
701
+ all_predictions.append(predictions.cpu())
702
+ study_ids.extend(batch_study_ids)
703
+
704
+ # Concatenate all predictions
705
+ all_predictions = torch.cat(all_predictions, dim=0)
706
+
707
+ # Process predictions
708
+ processed_results = process_predictions(all_predictions, biomarker_config)
709
+
710
+ # Add study IDs
711
+ processed_results['STUDY_ID'] = study_ids
712
+
713
+ # Calculate metrics if ground truth available
714
+ if not only_pred and all_targets:
715
+ print("Calculating metrics...")
716
+
717
+ # Convert targets to tensor format - CRITICAL: ensure predictions and targets are aligned
718
+ target_tensors = []
719
+ prediction_indices = []
720
+ for idx, study_id in enumerate(study_ids):
721
+ if study_id in all_targets:
722
+ target_tensors.append(torch.from_numpy(all_targets[study_id]))
723
+ prediction_indices.append(idx)
724
+
725
+ if target_tensors:
726
+ target_tensor = torch.stack(target_tensors).to(device)
727
+ # Only use predictions that have corresponding targets
728
+ aligned_predictions = all_predictions[prediction_indices].to(device)
729
+ metrics = calculate_enhanced_metrics(aligned_predictions, target_tensor, biomarker_config, optimal_thresholds)
730
+ processed_results['metrics'] = metrics
731
+
732
+ return processed_results
733
+
734
+ def save_results(results: Dict[str, Any], output_dir: str, biomarker_config: FlexibleBiomarkerConfig):
735
+ """Save results to files"""
736
+
737
+ os.makedirs(output_dir, exist_ok=True)
738
+
739
+ # Save predictions CSV
740
+ if args.save_predictions:
741
+ # Create DataFrame from results
742
+ df_data = {}
743
+
744
+ # Add study IDs
745
+ df_data['STUDY_ID'] = results['STUDY_ID']
746
+
747
+ # Add predictions for each biomarker
748
+ for biomarker in biomarker_config.binary_biomarkers:
749
+ df_data[biomarker.name] = results[biomarker.name]
750
+
751
+ for biomarker in biomarker_config.multiclass_biomarkers:
752
+ df_data[f"{biomarker.name}_predicted_class"] = results[f"{biomarker.name}_predicted_class"]
753
+ # Save probabilities as separate columns
754
+ probs = results[f"{biomarker.name}_probabilities"]
755
+ for i, class_name in enumerate(biomarker.classes):
756
+ df_data[f"{biomarker.name}_{class_name}_prob"] = probs[:, i]
757
+
758
+ for biomarker in biomarker_config.continuous_biomarkers:
759
+ df_data[biomarker.name] = results[biomarker.name]
760
+
761
+ df = pd.DataFrame(df_data)
762
+ predictions_path = os.path.join(output_dir, 'predictions.csv')
763
+ df.to_csv(predictions_path, index=False)
764
+ print(f"Predictions saved to: {predictions_path}")
765
+
766
+ # Save metrics (only if --save_metrics flag is used)
767
+ if 'metrics' in results and args.save_metrics:
768
+ metrics = results['metrics']
769
+
770
+ # Save detailed metrics JSON
771
+ metrics_path = os.path.join(output_dir, 'test_metrics.json')
772
+ with open(metrics_path, 'w') as f:
773
+ json.dump(metrics, f, indent=2)
774
+ print(f"Detailed metrics saved to: {metrics_path}")
775
+
776
+ # Print summary metrics (always show, regardless of save_metrics flag)
777
+ if 'metrics' in results:
778
+ metrics = results['metrics']
779
+ print("\n" + "="*60)
780
+ print("TEST RESULTS SUMMARY")
781
+ print("="*60)
782
+
783
+ # Binary classification metrics
784
+ if biomarker_config.binary_biomarkers:
785
+ print("\nBinary Classification Metrics (with 95% CI):")
786
+ for biomarker in biomarker_config.binary_biomarkers:
787
+ if biomarker.name in metrics:
788
+ metric_data = metrics[biomarker.name]
789
+ print(f" {biomarker.name}:")
790
+
791
+ # AUROC
792
+ auroc = metric_data.get('auroc', np.nan)
793
+ auroc_ci = metric_data.get('auroc_ci', (np.nan, np.nan))
794
+ if not np.isnan(auroc):
795
+ print(f" AUROC: {auroc:.4f} [{auroc_ci[0]:.4f}, {auroc_ci[1]:.4f}]")
796
+
797
+ # Precision
798
+ precision = metric_data.get('precision', np.nan)
799
+ precision_ci = metric_data.get('precision_ci', (np.nan, np.nan))
800
+ if not np.isnan(precision):
801
+ print(f" Precision: {precision:.4f} [{precision_ci[0]:.4f}, {precision_ci[1]:.4f}]")
802
+
803
+ # Recall
804
+ recall = metric_data.get('recall', np.nan)
805
+ recall_ci = metric_data.get('recall_ci', (np.nan, np.nan))
806
+ if not np.isnan(recall):
807
+ print(f" Recall: {recall:.4f} [{recall_ci[0]:.4f}, {recall_ci[1]:.4f}]")
808
+
809
+ # Specificity
810
+ specificity = metric_data.get('specificity', np.nan)
811
+ specificity_ci = metric_data.get('specificity_ci', (np.nan, np.nan))
812
+ if not np.isnan(specificity):
813
+ print(f" Specificity: {specificity:.4f} [{specificity_ci[0]:.4f}, {specificity_ci[1]:.4f}]")
814
+
815
+ # F1-Score
816
+ f1 = metric_data.get('f1_score', np.nan)
817
+ f1_ci = metric_data.get('f1_score_ci', (np.nan, np.nan))
818
+ if not np.isnan(f1):
819
+ print(f" F1-Score: {f1:.4f} [{f1_ci[0]:.4f}, {f1_ci[1]:.4f}]")
820
+
821
+ # Accuracy
822
+ accuracy = metric_data.get('accuracy', np.nan)
823
+ accuracy_ci = metric_data.get('accuracy_ci', (np.nan, np.nan))
824
+ if not np.isnan(accuracy):
825
+ print(f" Accuracy: {accuracy:.4f} [{accuracy_ci[0]:.4f}, {accuracy_ci[1]:.4f}]")
826
+
827
+ # Threshold used
828
+ threshold = metric_data.get('threshold_used', 'N/A')
829
+ print(f" Threshold used: {threshold}")
830
+
831
+ # Multiclass classification metrics
832
+ if biomarker_config.multiclass_biomarkers:
833
+ print("\nMulticlass Classification Metrics:")
834
+ for biomarker in biomarker_config.multiclass_biomarkers:
835
+ if biomarker.name in metrics:
836
+ metric_data = metrics[biomarker.name]
837
+ print(f" {biomarker.name}:")
838
+ print(f" Accuracy: {metric_data.get('accuracy', 'N/A'):.4f}")
839
+ print(f" F1-Score (macro): {metric_data.get('f1_score_macro', 'N/A'):.4f}")
840
+
841
+ # Regression metrics
842
+ if biomarker_config.continuous_biomarkers:
843
+ print("\nRegression Metrics (with 95% CI):")
844
+ for biomarker in biomarker_config.continuous_biomarkers:
845
+ if biomarker.name in metrics:
846
+ metric_data = metrics[biomarker.name]
847
+ print(f" {biomarker.name}:")
848
+
849
+ # MAE
850
+ mae = metric_data.get('mae', np.nan)
851
+ mae_ci = metric_data.get('mae_ci', (np.nan, np.nan))
852
+ if not np.isnan(mae):
853
+ print(f" MAE: {mae:.4f} [{mae_ci[0]:.4f}, {mae_ci[1]:.4f}]")
854
+
855
+ # MSE
856
+ mse = metric_data.get('mse', np.nan)
857
+ mse_ci = metric_data.get('mse_ci', (np.nan, np.nan))
858
+ if not np.isnan(mse):
859
+ print(f" MSE: {mse:.4f} [{mse_ci[0]:.4f}, {mse_ci[1]:.4f}]")
860
+
861
+ # R²
862
+ r2 = metric_data.get('r2_score', np.nan)
863
+ r2_ci = metric_data.get('r2_score_ci', (np.nan, np.nan))
864
+ if not np.isnan(r2):
865
+ print(f" R²: {r2:.4f} [{r2_ci[0]:.4f}, {r2_ci[1]:.4f}]")
866
+
867
+ # Overall metrics
868
+ if 'average_auroc' in metrics and metrics['average_auroc'] > 0:
869
+ print(f"\nOverall Classification Performance:")
870
+ print(f" Average AUROC: {metrics['average_auroc']:.4f}")
871
+ print(f" Median AUROC: {metrics['median_auroc']:.4f}")
872
+
873
+ if 'avg_regression_loss' in metrics:
874
+ print(f"\nOverall Regression Performance:")
875
+ print(f" Average Regression Loss: {metrics['avg_regression_loss']:.4f}")
876
+
877
+ print("="*60)
878
+
879
+ def main():
880
+ global args
881
+ args = arg_parse()
882
+
883
+ print("="*60)
884
+ print("FLEXIBLE MULTI-TASK TESTING")
885
+ print("="*60)
886
+
887
+ # Load biomarker configuration
888
+ print(f"Loading biomarker configuration from: {args.biomarker_config}")
889
+ biomarker_config = FlexibleBiomarkerConfig(args.biomarker_config)
890
+ biomarker_config.print_summary()
891
+
892
+ # Load checkpoint
893
+ checkpoint = load_checkpoint(args.checkpoint_path, legacy_compat=args.legacy_checkpoint_compat)
894
+
895
+ # Create model and get config
896
+ model, config = create_model_from_checkpoint(
897
+ checkpoint,
898
+ biomarker_config,
899
+ legacy_compat=args.legacy_checkpoint_compat
900
+ )
901
+
902
+ # Load optimal thresholds from checkpoint or find them on validation set
903
+ optimal_thresholds = checkpoint.get('optimal_thresholds', {})
904
+ if optimal_thresholds:
905
+ print(f"Loaded optimal thresholds from checkpoint: {optimal_thresholds}")
906
+ else:
907
+ print("No optimal thresholds found in checkpoint.")
908
+ if biomarker_config.binary_biomarkers:
909
+ print("Finding optimal thresholds on validation set...")
910
+ # Use validation data directory if specified, otherwise use same as test data
911
+ val_data_dir = args.val_data_dir if args.val_data_dir else args.data_dir
912
+ optimal_thresholds = find_optimal_thresholds_on_validation(
913
+ model, biomarker_config, val_data_dir, config, args.size, args.batch_size
914
+ )
915
+ else:
916
+ print("No binary biomarkers - skipping threshold optimization")
917
+ optimal_thresholds = {}
918
+
919
+ # Create test dataset with matching preprocessing
920
+ test_dataloader = create_test_dataset(
921
+ args.data_dir,
922
+ biomarker_config,
923
+ config,
924
+ args.size,
925
+ args.only_pred,
926
+ args.test_csv,
927
+ args.batch_size
928
+ )
929
+
930
+ # Run inference
931
+ results = run_inference(model, test_dataloader, biomarker_config, optimal_thresholds, args.only_pred)
932
+
933
+ # Save results
934
+ save_results(results, args.output_dir, biomarker_config)
935
+
936
+ print(f"\nTesting completed! Results saved to: {args.output_dir}")
937
+
938
+ if __name__ == "__main__":
939
+ main()
code/train.py ADDED
@@ -0,0 +1,923 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Flexible Training Pipeline for Multi-Task Comorbidity Detection
3
+ Uses the flexible biomarker configuration system for any task structure
4
+ """
5
+
6
+ import os
7
+ import random
8
+ import numpy as np
9
+ import torch
10
+ import torch.optim as optim
11
+ import torch.nn as nn
12
+ import torch.nn.functional as F
13
+ from torch.utils.data import DataLoader, WeightedRandomSampler
14
+ from torch.utils.tensorboard import SummaryWriter
15
+ from torchvision import transforms
16
+ from sklearn.utils.class_weight import compute_class_weight
17
+ from tqdm import tqdm
18
+ import json
19
+ import time
20
+ import logging
21
+ import sys
22
+ from argparse import ArgumentParser
23
+ from typing import Dict, List, Tuple, Any
24
+
25
+ # Import our custom modules
26
+ from dataset import ClassifierDataset
27
+ from model.model_factory import ModelFactory
28
+ from model.flexible_multitask_head import FlexibleMultiTaskLoss, FlexibleMetricsCalculator
29
+ from model.gradnorm_loss import GradNormLoss, GradNormTrainer
30
+ from config.biomarker_config import FlexibleBiomarkerConfig
31
+ from config.experiment_config import (
32
+ ExperimentConfig, get_model_defaults, DEFAULT_AUGMENTATIONS,
33
+ parse_augmentation_string, create_optimizer, create_scheduler
34
+ )
35
+ from utils.checkpoints import save_checkpoint, load_checkpoint
36
+
37
+ # Set device
38
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
39
+ print(f"Using device: {device}")
40
+
41
+
42
+ def set_global_seed(seed: int, deterministic: bool = False) -> None:
43
+ """Set global random seeds for reproducible training runs."""
44
+ random.seed(seed)
45
+ np.random.seed(seed)
46
+ torch.manual_seed(seed)
47
+ if torch.cuda.is_available():
48
+ torch.cuda.manual_seed_all(seed)
49
+
50
+ if deterministic:
51
+ torch.backends.cudnn.deterministic = True
52
+ torch.backends.cudnn.benchmark = False
53
+ else:
54
+ torch.backends.cudnn.deterministic = False
55
+ torch.backends.cudnn.benchmark = True
56
+
57
+
58
+ def normalize_fine_tuning_strategy(strategy: str) -> str:
59
+ """Normalize fine-tuning strategy values to canonical internal names."""
60
+ normalized = strategy.strip().lower().replace("-", "_").replace(" ", "_")
61
+ if normalized in {"linear_probe", "linearprobe"}:
62
+ return "linear_probe"
63
+ return "full"
64
+
65
+
66
+ def compute_class_weights_for_dataset(dataset, biomarker_config: FlexibleBiomarkerConfig):
67
+ """Compute class weights for balanced training"""
68
+ class_weights = {}
69
+
70
+ # Get all target tensors
71
+ all_targets = dataset.targets
72
+
73
+ # Compute weights for binary tasks
74
+ for biomarker in biomarker_config.binary_biomarkers:
75
+ layout = biomarker_config.get_tensor_layout()[biomarker.name]
76
+ labels = all_targets[:, layout.start_idx]
77
+
78
+ unique_classes = np.unique(labels)
79
+ if len(unique_classes) > 1:
80
+ weights = compute_class_weight('balanced', classes=unique_classes, y=labels)
81
+ # Use positive class weight for BCE
82
+ pos_weight = weights[1] / weights[0] if len(weights) > 1 else 1.0
83
+ class_weights[biomarker.name] = pos_weight
84
+ else:
85
+ class_weights[biomarker.name] = 1.0
86
+
87
+ return class_weights
88
+
89
+
90
+ def create_data_transforms(config: ExperimentConfig, is_training=True):
91
+ """Create data transforms optimized for different pre-trained models on medical images"""
92
+ aug_params = parse_augmentation_string(config.image_augmentations)
93
+
94
+ if is_training:
95
+ transform_list = []
96
+
97
+ # Different augmentation strategies based on pre-training source
98
+ if config.pretrained_weights == "RadImageNet":
99
+ # RadImageNet-specific augmentations (more conservative for medical domain)
100
+ if aug_params['horizontal_flip']:
101
+ # Very conservative for medical images
102
+ transform_list.append(transforms.RandomHorizontalFlip(p=0.2))
103
+
104
+ # Use RandomApply to prevent over-augmentation
105
+ geometric_augs = []
106
+
107
+ if aug_params['rotation'] > 0:
108
+ # Very conservative rotation for medical images
109
+ geometric_augs.append(transforms.RandomRotation(degrees=aug_params['rotation']//3)) # 1/3 the rotation
110
+
111
+ if aug_params['random_crop']:
112
+ # Minimal cropping to preserve anatomical features
113
+ geometric_augs.append(transforms.RandomResizedCrop(
114
+ 256,
115
+ scale=(0.95, 1.0), # Very conservative cropping
116
+ ratio=(0.9, 1.1) # Minimal aspect ratio change
117
+ ))
118
+
119
+ # Apply geometric augmentations with low probability
120
+ if geometric_augs:
121
+ transform_list.append(transforms.RandomApply(geometric_augs, p=0.4))
122
+
123
+ # Minimal color augmentations for medical images
124
+ if aug_params['color_jitter']:
125
+ transform_list.append(transforms.RandomApply([
126
+ transforms.ColorJitter(
127
+ brightness=aug_params['brightness'] * 0.3, # Very reduced intensity
128
+ contrast=aug_params['contrast'] * 0.3 # Very reduced intensity
129
+ )
130
+ ], p=0.3)) # Very low probability
131
+
132
+ else:
133
+ # ImageNet or non-pretrained augmentations (more aggressive)
134
+ if aug_params['horizontal_flip']:
135
+ # Reduced probability for medical images (anatomical consistency)
136
+ transform_list.append(transforms.RandomHorizontalFlip(p=0.3))
137
+
138
+ # Use RandomApply to prevent over-augmentation
139
+ geometric_augs = []
140
+
141
+ if aug_params['rotation'] > 0:
142
+ # More conservative rotation for medical images
143
+ geometric_augs.append(transforms.RandomRotation(degrees=aug_params['rotation']//2)) # Half the rotation
144
+
145
+ if aug_params['random_crop']:
146
+ # Less aggressive cropping to preserve anatomical features
147
+ geometric_augs.append(transforms.RandomResizedCrop(
148
+ 256,
149
+ scale=(0.9, 1.0), # Less aggressive cropping
150
+ ratio=(0.8, 1.2) # Slightly wider aspect ratio
151
+ ))
152
+
153
+ # Apply geometric augmentations with moderate probability
154
+ if geometric_augs:
155
+ transform_list.append(transforms.RandomApply(geometric_augs, p=0.6))
156
+
157
+ # Color augmentations (less critical for grayscale, but helps with domain adaptation)
158
+ if aug_params['color_jitter']:
159
+ transform_list.append(transforms.RandomApply([
160
+ transforms.ColorJitter(
161
+ brightness=aug_params['brightness'] * 0.5, # Reduced intensity
162
+ contrast=aug_params['contrast'] * 0.5 # Reduced intensity
163
+ )
164
+ ], p=0.4)) # Lower probability
165
+
166
+ # Convert to tensor
167
+ transform_list.append(transforms.ToTensor())
168
+
169
+ # CRITICAL: Convert grayscale to 3-channel for pre-trained models
170
+ transform_list.append(transforms.Lambda(lambda x: x.repeat(3, 1, 1)))
171
+
172
+ # Add normalization - use appropriate stats based on pre-training
173
+ if aug_params['imagenet_norm']:
174
+ if config.pretrained_weights == "ImageNet":
175
+ # Use ImageNet normalization for ImageNet pre-trained models
176
+ transform_list.append(transforms.Normalize(
177
+ mean=[0.485, 0.456, 0.406],
178
+ std=[0.229, 0.224, 0.225]
179
+ ))
180
+ elif config.pretrained_weights == "RadImageNet":
181
+ # Use RadImageNet normalization (medical imaging specific)
182
+ # Note: These are estimated values - you may need to adjust based on actual RadImageNet stats
183
+ transform_list.append(transforms.Normalize(
184
+ mean=[0.485, 0.456, 0.406], # Using ImageNet stats as fallback
185
+ std=[0.229, 0.224, 0.225] # RadImageNet likely uses similar normalization
186
+ ))
187
+ else:
188
+ # Use CT-specific normalization for non-pretrained models
189
+ transform_list.append(transforms.Normalize(
190
+ mean=[0.55001191, 0.55001191, 0.55001191],
191
+ std=[0.18854326, 0.18854326, 0.18854326]
192
+ ))
193
+
194
+ return transforms.Compose(transform_list)
195
+
196
+ else:
197
+ # Validation/test transforms (no augmentation)
198
+ transform_list = [transforms.ToTensor()]
199
+
200
+ # CRITICAL: Convert grayscale to 3-channel for pre-trained models
201
+ transform_list.append(transforms.Lambda(lambda x: x.repeat(3, 1, 1)))
202
+
203
+ if aug_params['imagenet_norm']:
204
+ if config.pretrained_weights == "ImageNet":
205
+ # Use ImageNet normalization for ImageNet pre-trained models
206
+ transform_list.append(transforms.Normalize(
207
+ mean=[0.485, 0.456, 0.406],
208
+ std=[0.229, 0.224, 0.225]
209
+ ))
210
+ elif config.pretrained_weights == "RadImageNet":
211
+ # Use RadImageNet normalization (medical imaging specific)
212
+ # Note: These are estimated values - you may need to adjust based on actual RadImageNet stats
213
+ transform_list.append(transforms.Normalize(
214
+ mean=[0.485, 0.456, 0.406], # Using ImageNet stats as fallback
215
+ std=[0.229, 0.224, 0.225] # RadImageNet likely uses similar normalization
216
+ ))
217
+ else:
218
+ # Use CT-specific normalization for non-pretrained models
219
+ transform_list.append(transforms.Normalize(
220
+ mean=[0.55001191, 0.55001191, 0.55001191],
221
+ std=[0.18854326, 0.18854326, 0.18854326]
222
+ ))
223
+
224
+ return transforms.Compose(transform_list)
225
+
226
+
227
+ def setup_logging(output_dir: str, experiment_name: str):
228
+ """Set up comprehensive logging for the experiment"""
229
+ # Create logs directory
230
+ logs_dir = os.path.join(output_dir, 'logs')
231
+ os.makedirs(logs_dir, exist_ok=True)
232
+
233
+ # Set up main logger
234
+ logger = logging.getLogger('experiment')
235
+ logger.setLevel(logging.INFO)
236
+
237
+ # Clear existing handlers
238
+ logger.handlers.clear()
239
+
240
+ # Create formatters
241
+ detailed_formatter = logging.Formatter(
242
+ '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
243
+ )
244
+ simple_formatter = logging.Formatter('%(asctime)s - %(message)s')
245
+
246
+ # File handler for detailed logs
247
+ detailed_log_file = os.path.join(logs_dir, 'experiment_detailed.log')
248
+ file_handler = logging.FileHandler(detailed_log_file)
249
+ file_handler.setLevel(logging.INFO)
250
+ file_handler.setFormatter(detailed_formatter)
251
+ logger.addHandler(file_handler)
252
+
253
+ # Console handler
254
+ console_handler = logging.StreamHandler(sys.stdout)
255
+ console_handler.setLevel(logging.INFO)
256
+ console_handler.setFormatter(simple_formatter)
257
+ logger.addHandler(console_handler)
258
+
259
+ # Create separate logger for training progress
260
+ training_logger = logging.getLogger('training')
261
+ training_logger.setLevel(logging.INFO)
262
+ training_logger.handlers.clear()
263
+
264
+ training_log_file = os.path.join(logs_dir, 'training_progress.log')
265
+ training_handler = logging.FileHandler(training_log_file)
266
+ training_handler.setLevel(logging.INFO)
267
+ training_handler.setFormatter(simple_formatter)
268
+ training_logger.addHandler(training_handler)
269
+ training_logger.addHandler(console_handler)
270
+
271
+ # Log experiment start
272
+ logger.info(f"Starting experiment: {experiment_name}")
273
+ logger.info(f"Output directory: {output_dir}")
274
+ logger.info(f"Logs directory: {logs_dir}")
275
+
276
+ return logger, training_logger
277
+
278
+
279
+ def create_balanced_sampler(dataset, biomarker_config: FlexibleBiomarkerConfig):
280
+ """Create balanced sampler for training"""
281
+ # Get all target tensors
282
+ all_targets = dataset.targets
283
+
284
+ # Create sample weights based on inverse frequency
285
+ sample_weights = np.ones(len(dataset), dtype=np.float64)
286
+
287
+ # Weight based on binary biomarkers
288
+ for biomarker in biomarker_config.binary_biomarkers:
289
+ layout = biomarker_config.get_tensor_layout()[biomarker.name]
290
+ labels = all_targets[:, layout.start_idx]
291
+
292
+ unique, counts = np.unique(labels, return_counts=True)
293
+ class_weights = len(labels) / (len(unique) * counts)
294
+
295
+ for j, label in enumerate(labels):
296
+ sample_weights[j] *= class_weights[int(label)]
297
+
298
+ # Convert to torch.DoubleTensor to prevent overflow and match WeightedRandomSampler expectations
299
+ sample_weights_tensor = torch.from_numpy(sample_weights).double()
300
+
301
+ return WeightedRandomSampler(sample_weights_tensor, len(sample_weights_tensor), replacement=True)
302
+
303
+
304
+ def train_epoch(model, dataloader, criterion, optimizer, device, metrics_calc, gradnorm_trainer=None):
305
+ """Train for one epoch"""
306
+ model.train()
307
+
308
+ total_loss = 0
309
+ all_predictions = []
310
+ all_targets = []
311
+ loss_components = {'total_loss': 0}
312
+
313
+ for batch_idx, (images, targets) in enumerate(tqdm(dataloader, desc="Training")):
314
+ images = images.to(device)
315
+ targets = targets.to(device)
316
+
317
+ # Convert single channel to 3-channel for models expecting RGB
318
+ if images.shape[1] == 1:
319
+ images = images.repeat(1, 3, 1, 1)
320
+
321
+ # Forward pass
322
+ predictions = model(images)
323
+
324
+ # Calculate loss - use GradNorm if available
325
+ if gradnorm_trainer is not None:
326
+ loss, loss_dict = gradnorm_trainer.compute_loss(model, predictions, targets)
327
+ else:
328
+ loss, loss_dict = criterion(predictions, targets)
329
+
330
+ # Backward pass
331
+ optimizer.zero_grad()
332
+ loss.backward()
333
+ optimizer.step()
334
+
335
+ # Accumulate metrics
336
+ total_loss += loss.item()
337
+ for key, value in loss_dict.items():
338
+ if key not in loss_components:
339
+ loss_components[key] = 0
340
+ loss_components[key] += value
341
+
342
+ # Store predictions and targets for metric calculation
343
+ all_predictions.append(predictions.detach().cpu())
344
+ all_targets.append(targets.detach().cpu())
345
+
346
+ # Calculate metrics
347
+ all_predictions = torch.cat(all_predictions, dim=0)
348
+ all_targets = torch.cat(all_targets, dim=0)
349
+
350
+ metrics = metrics_calc.calculate_all_metrics(all_predictions, all_targets)
351
+
352
+ # Average losses
353
+ avg_loss = total_loss / len(dataloader)
354
+ for key in loss_components:
355
+ loss_components[key] /= len(dataloader)
356
+
357
+ return avg_loss, metrics, loss_components
358
+
359
+
360
+ def validate_epoch(model, dataloader, criterion, device, metrics_calc):
361
+ """Validate for one epoch"""
362
+ model.eval()
363
+
364
+ total_loss = 0
365
+ all_predictions = []
366
+ all_targets = []
367
+ loss_components = {'total_loss': 0}
368
+
369
+ with torch.no_grad():
370
+ for batch_idx, (images, targets) in enumerate(tqdm(dataloader, desc="Validation")):
371
+ images = images.to(device)
372
+ targets = targets.to(device)
373
+
374
+ # Convert single channel to 3-channel for models expecting RGB
375
+ if images.shape[1] == 1:
376
+ images = images.repeat(1, 3, 1, 1)
377
+
378
+ # Forward pass
379
+ predictions = model(images)
380
+
381
+ # Calculate loss
382
+ loss, loss_dict = criterion(predictions, targets)
383
+
384
+ # Accumulate metrics
385
+ total_loss += loss.item()
386
+ for key, value in loss_dict.items():
387
+ if key not in loss_components:
388
+ loss_components[key] = 0
389
+ loss_components[key] += value
390
+
391
+ # Store predictions and targets for metric calculation
392
+ all_predictions.append(predictions.detach().cpu())
393
+ all_targets.append(targets.detach().cpu())
394
+
395
+ # Calculate metrics with threshold optimization
396
+ all_predictions = torch.cat(all_predictions, dim=0)
397
+ all_targets = torch.cat(all_targets, dim=0)
398
+
399
+ # Update optimal thresholds based on validation data
400
+ metrics_calc.update_optimal_thresholds(all_predictions, all_targets)
401
+
402
+ # Calculate metrics using optimal thresholds
403
+ metrics = metrics_calc.calculate_all_metrics(all_predictions, all_targets)
404
+
405
+ # Average losses
406
+ avg_loss = total_loss / len(dataloader)
407
+ for key in loss_components:
408
+ loss_components[key] /= len(dataloader)
409
+
410
+ return avg_loss, metrics, loss_components
411
+
412
+
413
+ def train_model(
414
+ config: ExperimentConfig,
415
+ data_dir: str,
416
+ output_dir: str,
417
+ biomarker_config: FlexibleBiomarkerConfig,
418
+ epochs: int = 100,
419
+ seed: int = 42,
420
+ ):
421
+ """Main training function"""
422
+
423
+ # Safety check: if directory exists and has important files, create a new one
424
+ if os.path.exists(output_dir):
425
+ important_files = ['best_checkpoint.pth', 'config.json', 'experiment_results.csv']
426
+ has_important_files = any(os.path.exists(os.path.join(output_dir, f)) for f in important_files)
427
+
428
+ if has_important_files:
429
+ import datetime
430
+ timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
431
+ original_output_dir = output_dir
432
+ output_dir = f"{output_dir}_{timestamp}"
433
+ print(f"Warning: Output directory {original_output_dir} exists with important files.")
434
+ print(f"Using new directory: {output_dir}")
435
+
436
+ # Create output directory
437
+ os.makedirs(output_dir, exist_ok=True)
438
+
439
+ # Setup comprehensive logging
440
+ logger, training_logger = setup_logging(output_dir, config.experiment_name)
441
+
442
+ # Setup tensorboard logging
443
+ writer = SummaryWriter(log_dir=os.path.join(output_dir, 'tensorboard'))
444
+
445
+ # Save configuration
446
+ config_file = os.path.join(output_dir, 'config.json')
447
+ with open(config_file, 'w') as f:
448
+ json.dump(config.to_dict(), f, indent=2)
449
+ logger.info(f"Configuration saved to: {config_file}")
450
+
451
+ # Save biomarker configuration
452
+ biomarker_config_file = os.path.join(output_dir, 'biomarker_config.json')
453
+ biomarker_config.save_to_file(biomarker_config_file)
454
+ logger.info(f"Biomarker configuration saved to: {biomarker_config_file}")
455
+
456
+ logger.info(f"Model: {config.model}")
457
+ logger.info(f"Single-target strategy: {config.single_target_strategy}")
458
+ logger.info(f"Multi-target strategy: {config.multi_target_strategy}")
459
+ logger.info(f"Expected GPU memory: {config.expected_gpu_memory}")
460
+ logger.info(f"Training epochs: {epochs}")
461
+ logger.info(f"Data directory: {data_dir}")
462
+ logger.info(f"Biomarker configuration: {biomarker_config.experiment_name}")
463
+ logger.info(f"Total output size: {biomarker_config.total_output_size}")
464
+ logger.info(f"Seed: {seed}")
465
+
466
+ # Create data transforms
467
+ train_transform = create_data_transforms(config, is_training=True)
468
+ val_transform = create_data_transforms(config, is_training=False)
469
+
470
+ # Load datasets
471
+ logger.info("Loading datasets...")
472
+ train_dataset = ClassifierDataset(
473
+ data_dir, biomarker_config, transforms=train_transform,
474
+ size=256, train=True
475
+ )
476
+
477
+ val_dataset = ClassifierDataset(
478
+ data_dir, biomarker_config, transforms=val_transform,
479
+ size=256, train=False
480
+ )
481
+
482
+ logger.info(f"Train dataset size: {len(train_dataset)}")
483
+ logger.info(f"Validation dataset size: {len(val_dataset)}")
484
+
485
+ # Compute class weights if specified
486
+ class_weights = None
487
+ if config.class_weighting == 'inverse_frequency':
488
+ logger.info("Computing class weights...")
489
+ class_weights = compute_class_weights_for_dataset(train_dataset, biomarker_config)
490
+ logger.info(f"Class weights computed for {len(class_weights)} binary biomarkers")
491
+
492
+ # Create data loaders
493
+ data_loader_generator = torch.Generator()
494
+ data_loader_generator.manual_seed(seed)
495
+
496
+ def _seed_worker(worker_id: int) -> None:
497
+ worker_seed = seed + worker_id
498
+ np.random.seed(worker_seed)
499
+ random.seed(worker_seed)
500
+ torch.manual_seed(worker_seed)
501
+
502
+ if config.sampling_strategy == 'balanced_batch':
503
+ train_sampler = create_balanced_sampler(train_dataset, biomarker_config)
504
+ train_loader = DataLoader(
505
+ train_dataset, batch_size=config.batch_size,
506
+ sampler=train_sampler, num_workers=8, pin_memory=True,
507
+ worker_init_fn=_seed_worker, generator=data_loader_generator
508
+ )
509
+ else:
510
+ train_loader = DataLoader(
511
+ train_dataset, batch_size=config.batch_size,
512
+ shuffle=True, num_workers=8, pin_memory=True,
513
+ worker_init_fn=_seed_worker, generator=data_loader_generator
514
+ )
515
+
516
+ val_loader = DataLoader(
517
+ val_dataset, batch_size=config.batch_size,
518
+ shuffle=False, num_workers=8, pin_memory=True,
519
+ worker_init_fn=_seed_worker, generator=data_loader_generator
520
+ )
521
+
522
+ # Create model
523
+ logger.info("Creating model...")
524
+ model = ModelFactory.create_model(
525
+ architecture=config.model,
526
+ num_classes=biomarker_config.total_output_size,
527
+ pretrained_weights=config.pretrained_weights,
528
+ fine_tuning_strategy=config.fine_tuning_strategy,
529
+ dropout=config.dropout,
530
+ biomarker_config=biomarker_config,
531
+ single_target_strategy=config.single_target_strategy
532
+ )
533
+
534
+ model = model.to(device)
535
+
536
+ # Log model info
537
+ total_params = sum(p.numel() for p in model.parameters())
538
+ trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
539
+ logger.info(f"Total parameters: {total_params:,}")
540
+ logger.info(f"Trainable parameters: {trainable_params:,}")
541
+
542
+ # Create loss function - check if GradNorm is enabled
543
+ use_gradnorm = getattr(config, 'use_gradnorm', False)
544
+ gradnorm_trainer = None
545
+
546
+ if use_gradnorm:
547
+ logger.info("Using GradNorm for loss balancing")
548
+ gradnorm_alpha = getattr(config, 'gradnorm_alpha', 0.16)
549
+ gradnorm_update_freq = getattr(config, 'gradnorm_update_freq', 10)
550
+
551
+ gradnorm_loss = GradNormLoss(
552
+ biomarker_config=biomarker_config,
553
+ class_weights=class_weights,
554
+ alpha=gradnorm_alpha,
555
+ update_weights_every=gradnorm_update_freq,
556
+ initial_task_loss_average_window=20,
557
+ normalize_losses=True,
558
+ restoring_force_factor=0.1
559
+ )
560
+ gradnorm_loss = gradnorm_loss.to(device) # Move to correct device
561
+ gradnorm_trainer = GradNormTrainer(gradnorm_loss)
562
+ criterion = gradnorm_loss # For validation
563
+ else:
564
+ criterion = FlexibleMultiTaskLoss(biomarker_config, class_weights=class_weights)
565
+
566
+ # Create optimizer and scheduler
567
+ # Note: GradNorm task weights are updated manually, not through optimizer
568
+ optimizer = create_optimizer(model.parameters(), config)
569
+
570
+ scheduler = create_scheduler(optimizer, config, epochs)
571
+
572
+ # Create metrics calculator
573
+ metrics_calc = FlexibleMetricsCalculator(biomarker_config)
574
+
575
+ # Training loop
576
+ best_median_auroc = 0.0
577
+ best_mae = float('inf') # Initialize MAE tracking for continuous-only scenarios
578
+ best_epoch = 0
579
+ patience = 10
580
+ patience_counter = 0
581
+
582
+ # Determine task types for logging and selection
583
+ has_classification_tasks = (len(biomarker_config.binary_biomarkers) > 0 or
584
+ len(biomarker_config.multiclass_biomarkers) > 0)
585
+ has_continuous_tasks = len(biomarker_config.continuous_biomarkers) > 0
586
+
587
+ logger.info(f"Starting training for {epochs} epochs with early stopping (patience: {patience})")
588
+ if has_classification_tasks and has_continuous_tasks:
589
+ logger.info("Multi-task training: Classification + Regression")
590
+ elif has_classification_tasks:
591
+ logger.info("Classification training: Using median AUROC for best model selection")
592
+ elif has_continuous_tasks:
593
+ logger.info("Regression training: Using MAE for best model selection")
594
+
595
+ for epoch in range(epochs):
596
+ epoch_start_time = time.time()
597
+
598
+ training_logger.info(f"Starting Epoch {epoch+1}/{epochs}")
599
+
600
+ # Training phase
601
+ train_loss, train_metrics, train_loss_components = train_epoch(
602
+ model, train_loader, criterion, optimizer, device, metrics_calc, gradnorm_trainer
603
+ )
604
+
605
+ # Validation phase
606
+ val_loss, val_metrics, val_loss_components = validate_epoch(
607
+ model, val_loader, criterion, device, metrics_calc
608
+ )
609
+
610
+ # Update scheduler
611
+ if isinstance(scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):
612
+ scheduler.step(val_loss)
613
+ else:
614
+ scheduler.step()
615
+
616
+ epoch_time = time.time() - epoch_start_time
617
+
618
+ # Log to tensorboard
619
+ writer.add_scalar('Loss/Train', train_loss, epoch)
620
+ writer.add_scalar('Loss/Validation', val_loss, epoch)
621
+ writer.add_scalar('Metrics/Average_AUROC_Train', train_metrics['average_auroc'], epoch)
622
+ writer.add_scalar('Metrics/Average_AUROC_Val', val_metrics['average_auroc'], epoch)
623
+ writer.add_scalar('Metrics/Median_AUROC_Train', train_metrics['median_auroc'], epoch)
624
+ writer.add_scalar('Metrics/Median_AUROC_Val', val_metrics['median_auroc'], epoch)
625
+ writer.add_scalar('Learning_Rate', optimizer.param_groups[0]['lr'], epoch)
626
+
627
+ # Log GradNorm weights if using GradNorm
628
+ if gradnorm_trainer is not None:
629
+ gradnorm_stats = gradnorm_trainer.get_training_stats()
630
+ task_weights = gradnorm_stats['task_weights']
631
+
632
+ for task_name, weight in task_weights.items():
633
+ writer.add_scalar(f'GradNorm_Weights/{task_name}', weight, epoch)
634
+
635
+ # Log GradNorm info
636
+ if gradnorm_stats['initial_losses_computed']:
637
+ training_logger.info(f"GradNorm task weights: {task_weights}")
638
+
639
+ # Log individual biomarker metrics
640
+ for biomarker in biomarker_config.binary_biomarkers:
641
+ if biomarker.name in train_metrics and biomarker.name in val_metrics:
642
+ train_biomarker_metrics = train_metrics[biomarker.name]
643
+ val_biomarker_metrics = val_metrics[biomarker.name]
644
+
645
+ if 'auroc' in train_biomarker_metrics and 'auroc' in val_biomarker_metrics:
646
+ writer.add_scalar(f'AUROC_Train/{biomarker.name}', train_biomarker_metrics['auroc'], epoch)
647
+ writer.add_scalar(f'AUROC_Val/{biomarker.name}', val_biomarker_metrics['auroc'], epoch)
648
+
649
+ # Log epoch results
650
+ training_logger.info(f"Epoch {epoch+1} completed in {epoch_time:.2f}s")
651
+ training_logger.info(f"Train Loss: {train_loss:.4f}, Val Loss: {val_loss:.4f}")
652
+ training_logger.info(f"Train Avg AUROC: {train_metrics['average_auroc']:.4f}, Val Avg AUROC: {val_metrics['average_auroc']:.4f}")
653
+ training_logger.info(f"Train Median AUROC: {train_metrics['median_auroc']:.4f}, Val Median AUROC: {val_metrics['median_auroc']:.4f}")
654
+
655
+ # Log individual biomarker validation metrics
656
+ training_logger.info("Validation metrics per biomarker:")
657
+ for biomarker in biomarker_config.binary_biomarkers:
658
+ if biomarker.name in val_metrics:
659
+ biomarker_metrics = val_metrics[biomarker.name]
660
+ if 'auroc' in biomarker_metrics and 'accuracy' in biomarker_metrics:
661
+ training_logger.info(f" {biomarker.name}: AUROC={biomarker_metrics['auroc']:.4f}, "
662
+ f"Acc={biomarker_metrics['accuracy']:.4f}, "
663
+ f"F1={biomarker_metrics.get('f1', 0.0):.4f}")
664
+
665
+ # Log multiclass biomarker metrics if any exist
666
+ for biomarker in biomarker_config.multiclass_biomarkers:
667
+ if biomarker.name in val_metrics:
668
+ biomarker_metrics = val_metrics[biomarker.name]
669
+ if 'accuracy' in biomarker_metrics:
670
+ training_logger.info(f" {biomarker.name}: Acc={biomarker_metrics['accuracy']:.4f}, "
671
+ f"F1={biomarker_metrics.get('f1_weighted', 0.0):.4f}")
672
+
673
+ # Log continuous biomarker metrics if any exist
674
+ for biomarker in biomarker_config.continuous_biomarkers:
675
+ if biomarker.name in val_metrics:
676
+ biomarker_metrics = val_metrics[biomarker.name]
677
+ if 'mse' in biomarker_metrics:
678
+ training_logger.info(f" {biomarker.name}: MSE={biomarker_metrics['mse']:.4f}, "
679
+ f"MAE={biomarker_metrics.get('mae', 0.0):.4f}")
680
+
681
+ # Save checkpoint if best model
682
+ # Use MAE for continuous-only scenarios, AUROC for classification scenarios
683
+
684
+ if has_classification_tasks:
685
+ # Use median AUROC for classification scenarios
686
+ # Check if we actually have AUROC values (not just 0.0)
687
+ if val_metrics['median_auroc'] > 0.0:
688
+ is_best = val_metrics['median_auroc'] > best_median_auroc
689
+ if is_best:
690
+ best_median_auroc = val_metrics['median_auroc']
691
+ best_epoch = epoch + 1
692
+ patience_counter = 0 # Reset patience counter
693
+ training_logger.info(f"New best model! Median AUROC: {best_median_auroc:.4f} (Avg: {val_metrics['average_auroc']:.4f})")
694
+ else:
695
+ patience_counter += 1
696
+ training_logger.info(f"No improvement. Patience: {patience_counter}/{patience}")
697
+
698
+ # Early stopping check
699
+ if patience_counter >= patience:
700
+ training_logger.info("Early stopping triggered!")
701
+ break
702
+ else:
703
+ # No actual AUROC values available - fall back to MAE if continuous tasks exist
704
+ if has_continuous_tasks:
705
+ training_logger.warning("No AUROC values available for classification tasks, falling back to MAE for model selection")
706
+ # Use MAE logic (will be handled in the elif block below)
707
+ pass
708
+ else:
709
+ # No meaningful metrics available
710
+ is_best = False
711
+ patience_counter += 1
712
+ training_logger.warning("No AUROC values available and no continuous tasks - cannot determine best model")
713
+ elif has_continuous_tasks:
714
+ # Use MAE for continuous-only scenarios (lower MAE is better)
715
+ mae_values = []
716
+ for biomarker in biomarker_config.continuous_biomarkers:
717
+ if biomarker.name in val_metrics:
718
+ mae = val_metrics[biomarker.name].get('mae', float('inf'))
719
+ mae_values.append(mae)
720
+ training_logger.info(f" {biomarker.name}: MAE={mae:.4f}")
721
+
722
+ if mae_values:
723
+ current_mae = np.mean(mae_values)
724
+ is_best = current_mae < best_mae
725
+ if is_best:
726
+ best_mae = current_mae
727
+ best_epoch = epoch + 1
728
+ patience_counter = 0 # Reset patience counter
729
+ training_logger.info(f"New best model! Average MAE: {best_mae:.4f}")
730
+ else:
731
+ patience_counter += 1
732
+ training_logger.info(f"No improvement. Current MAE: {current_mae:.4f}, Best MAE: {best_mae:.4f}. Patience: {patience_counter}/{patience}")
733
+
734
+ # Early stopping check
735
+ if patience_counter >= patience:
736
+ training_logger.info("Early stopping triggered!")
737
+ break
738
+ else:
739
+ # Fallback if no MAE values available
740
+ is_best = False
741
+ patience_counter += 1
742
+ training_logger.warning("No MAE values available for continuous biomarkers")
743
+ else:
744
+ # No biomarkers configured - should not happen
745
+ is_best = False
746
+ training_logger.error("No biomarkers configured!")
747
+ break
748
+
749
+ # Save checkpoint
750
+ checkpoint = {
751
+ 'epoch': epoch + 1,
752
+ 'model_state_dict': model.state_dict(),
753
+ 'optimizer_state_dict': optimizer.state_dict(),
754
+ 'scheduler_state_dict': scheduler.state_dict(),
755
+ 'train_loss': train_loss,
756
+ 'val_loss': val_loss,
757
+ 'train_metrics': train_metrics,
758
+ 'val_metrics': val_metrics,
759
+ 'config': config.to_dict(),
760
+ 'biomarker_config': biomarker_config.experiment_name,
761
+ 'best_median_auroc': best_median_auroc,
762
+ 'best_mae': best_mae,
763
+ 'best_epoch': best_epoch,
764
+ 'optimal_thresholds': metrics_calc.optimal_thresholds # Save optimal thresholds
765
+ }
766
+
767
+ # Save latest and best checkpoints
768
+ torch.save(checkpoint, os.path.join(output_dir, 'latest_checkpoint.pth'))
769
+ if is_best:
770
+ torch.save(checkpoint, os.path.join(output_dir, 'best_checkpoint.pth'))
771
+
772
+ # Close tensorboard writer
773
+ writer.close()
774
+
775
+ # Final logging based on task type and actual metrics used
776
+ if has_classification_tasks and best_median_auroc > 0.0:
777
+ # Classification tasks with actual AUROC values
778
+ logger.info(f"Training completed! Best model at epoch {best_epoch} with Median AUROC: {best_median_auroc:.4f}")
779
+ return model, best_median_auroc
780
+ elif has_continuous_tasks:
781
+ # Continuous tasks (either only continuous, or classification tasks fell back to MAE)
782
+ logger.info(f"Training completed! Best model at epoch {best_epoch} with MAE: {best_mae:.4f}")
783
+ return model, best_mae
784
+ else:
785
+ # No meaningful metrics available
786
+ logger.info(f"Training completed! Best model at epoch {best_epoch} (no meaningful metrics available)")
787
+ return model, 0.0
788
+
789
+
790
+ def main():
791
+ parser = ArgumentParser(description='Flexible Multi-Task Training')
792
+
793
+ # --- Required arguments ---
794
+ parser.add_argument('--model', required=True,
795
+ help='Model architecture to train (e.g. "ResNet-18", "ViT-Small (DINOv2)"). '
796
+ 'See README for the full list of supported models.')
797
+ parser.add_argument('--data_dir', required=True,
798
+ help='Path to dataset directory. Must contain train.csv, val.csv, and a data/ subfolder with PNG images.')
799
+ parser.add_argument('--biomarker_config', required=True,
800
+ help='Path to biomarker configuration file (YAML or JSON). '
801
+ 'See config/biomarker_config_multitask_example.yaml for the full multi-task config used in the paper.')
802
+
803
+ # --- Output ---
804
+ parser.add_argument('--output_dir', default='./outputs',
805
+ help='Directory to save checkpoints, logs, and TensorBoard (default: ./outputs)')
806
+ parser.add_argument('--experiment_name',
807
+ help='Custom name for this run. Auto-generated from model/lr/batch if not provided.')
808
+
809
+ # --- Training hyperparameters (defaults match the published experiments) ---
810
+ parser.add_argument('--epochs', type=int, default=100,
811
+ help='Number of training epochs (default: 100)')
812
+ parser.add_argument('--learning_rate', type=float, default=1e-4,
813
+ help='Learning rate (default: 1e-4)')
814
+ parser.add_argument('--batch_size', type=int, default=16,
815
+ help='Batch size (default: 16)')
816
+ parser.add_argument('--weight_decay', type=float, default=1e-4,
817
+ help='Weight decay (default: 1e-4)')
818
+ parser.add_argument('--optimizer', default='AdamW', choices=['AdamW', 'Adam', 'SGD'],
819
+ help='Optimizer (default: AdamW)')
820
+ parser.add_argument('--scheduler', default='CosineAnnealing',
821
+ choices=['CosineAnnealing', 'CosineAnnealingWarmRestarts',
822
+ 'ReduceLROnPlateau', 'StepLR', 'ExponentialLR'],
823
+ help='LR scheduler (default: CosineAnnealing)')
824
+ parser.add_argument('--dropout', type=float, default=0.2,
825
+ help='Dropout rate (default: 0.2)')
826
+ parser.add_argument('--class_weighting', default='inverse_frequency',
827
+ choices=['inverse_frequency', 'none'],
828
+ help='Class weighting strategy (default: inverse_frequency)')
829
+ parser.add_argument('--sampling_strategy', default='balanced_batch',
830
+ choices=['balanced_batch', 'random'],
831
+ help='Sampling strategy for training DataLoader (default: balanced_batch)')
832
+ parser.add_argument('--fine_tuning_strategy', default='Full fine-tuning',
833
+ choices=['Full fine-tuning', 'full', 'linear_probe'],
834
+ help='Fine-tuning strategy (default: Full fine-tuning)')
835
+ parser.add_argument('--seed', type=int, default=42,
836
+ help='Global random seed for reproducibility (default: 42)')
837
+ parser.add_argument('--deterministic', action='store_true',
838
+ help='Enable deterministic backend behavior (can reduce throughput)')
839
+
840
+ # --- Per-model defaults (auto-detected from model name if not specified) ---
841
+ parser.add_argument('--pretrained_weights',
842
+ help='Pretrained weights to use. Auto-detected from --model if not provided.')
843
+ parser.add_argument('--single_target_strategy',
844
+ help='Single-target strategy. Auto-detected from --model if not provided.')
845
+
846
+ # --- GradNorm ---
847
+ parser.add_argument('--use_gradnorm', action='store_true',
848
+ help='Enable GradNorm adaptive loss balancing')
849
+ parser.add_argument('--gradnorm_alpha', type=float, default=0.16,
850
+ help='GradNorm restoring force strength (default: 0.16)')
851
+ parser.add_argument('--gradnorm_update_freq', type=int, default=10,
852
+ help='Update GradNorm weights every N iterations (default: 10)')
853
+
854
+ args = parser.parse_args()
855
+ set_global_seed(args.seed, deterministic=args.deterministic)
856
+
857
+ # Load biomarker configuration
858
+ print(f"Loading biomarker configuration from: {args.biomarker_config}")
859
+ biomarker_config = FlexibleBiomarkerConfig(args.biomarker_config)
860
+
861
+ print("Biomarker configuration loaded:")
862
+ biomarker_config.print_summary()
863
+
864
+ # Resolve per-model defaults for pretrained_weights and single_target_strategy
865
+ model_defaults = get_model_defaults(args.model)
866
+ pretrained_weights = args.pretrained_weights or model_defaults['pretrained_weights']
867
+ single_target_strategy = args.single_target_strategy or model_defaults['single_target_strategy']
868
+ fine_tuning_strategy = normalize_fine_tuning_strategy(args.fine_tuning_strategy)
869
+
870
+ # Build ExperimentConfig directly from CLI args
871
+ config = ExperimentConfig(
872
+ model=args.model,
873
+ loss_function='CE',
874
+ must_include=True,
875
+ learning_rate=[args.learning_rate],
876
+ batch_size=args.batch_size,
877
+ weight_decay=args.weight_decay,
878
+ optimizer=args.optimizer,
879
+ scheduler=args.scheduler,
880
+ image_augmentations=DEFAULT_AUGMENTATIONS.copy(),
881
+ dropout=args.dropout,
882
+ loss_specific_params='class_weights=inverse_frequency',
883
+ multi_target_strategy='Shared backbone + task-specific heads',
884
+ single_target_strategy=single_target_strategy,
885
+ pretrained_weights=pretrained_weights,
886
+ fine_tuning_strategy=fine_tuning_strategy,
887
+ expected_gpu_memory='',
888
+ architectural_family='',
889
+ class_weighting=args.class_weighting,
890
+ sampling_strategy=args.sampling_strategy,
891
+ threshold_selection='F1_optimal',
892
+ experiment_name=args.experiment_name or '',
893
+ use_gradnorm=args.use_gradnorm,
894
+ gradnorm_alpha=args.gradnorm_alpha,
895
+ gradnorm_update_freq=args.gradnorm_update_freq,
896
+ )
897
+
898
+ output_dir = os.path.join(args.output_dir, config.experiment_name)
899
+
900
+ print(f"\n{'='*50}")
901
+ print(f"Training: {config.model}")
902
+ print(f" Pretrained weights: {pretrained_weights}")
903
+ print(f" Single-target strategy: {single_target_strategy}")
904
+ print(f" Learning rate: {args.learning_rate}")
905
+ print(f" Batch size: {args.batch_size}")
906
+ print(f" Epochs: {args.epochs}")
907
+ print(f" Seed: {args.seed}")
908
+ print(f" Deterministic: {args.deterministic}")
909
+ print(f" Output dir: {output_dir}")
910
+ print(f"{'='*50}\n")
911
+
912
+ train_model(
913
+ config=config,
914
+ data_dir=args.data_dir,
915
+ output_dir=output_dir,
916
+ biomarker_config=biomarker_config,
917
+ epochs=args.epochs,
918
+ seed=args.seed,
919
+ )
920
+
921
+
922
+ if __name__ == "__main__":
923
+ main()
code/utils/checkpoints.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ import logging
4
+ import torch
5
+
6
+ # Functions in this file are inspired by the following:
7
+ # https://github.com/cs230-stanford/cs230-code-examples/blob/master/pytorch/vision/utils.py
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+
12
+ def save_checkpoint(state, model_state, isbest, checkpoint):
13
+ """
14
+ Save training and model state to a checkpoint directory.
15
+ """
16
+ filepath = os.path.join(checkpoint, 'last.pth')
17
+ model_filepath = os.path.join(checkpoint, 'model_last.pth')
18
+ if not os.path.exists(checkpoint):
19
+ logger.info("Checkpoint directory does not exist. Creating %s", checkpoint)
20
+ os.makedirs(checkpoint)
21
+
22
+ torch.save(state, filepath)
23
+ torch.save(model_state, model_filepath)
24
+ if isbest:
25
+ logger.info("Saving best checkpoint copy")
26
+ shutil.copyfile(filepath, os.path.join(checkpoint, 'best.pth'))
27
+ shutil.copyfile(model_filepath, os.path.join(checkpoint, 'model_best.pth'))
28
+
29
+
30
+ def load_checkpoint(checkpoint, model, optimizer=None):
31
+ """
32
+ Load checkpoint file into model (and optimizer if provided).
33
+
34
+ The key remapping logic below is kept for compatibility with older
35
+ checkpoint formats used during project development.
36
+ """
37
+ if not os.path.exists(checkpoint):
38
+ raise IOError("File doesn't exist {}".format(checkpoint))
39
+
40
+ if torch.cuda.is_available():
41
+ checkpoint = torch.load(checkpoint)
42
+ else:
43
+ checkpoint = torch.load(checkpoint, map_location='cpu')
44
+
45
+ state_dict = {}
46
+ for key in checkpoint['state_dict'].keys():
47
+ if 'layers.0.' in key:
48
+ state_dict[key.split('0.')[0].split('module.')[1] + key.split('0.')[1]] = checkpoint['state_dict'][key]
49
+ elif 'layers.1.' in key:
50
+ state_dict[key.replace('1', '8').split('module.')[1]] = checkpoint['state_dict'][key]
51
+ elif 'module.' in key:
52
+ state_dict[key.split('module.')[1]] = checkpoint['state_dict'][key]
53
+ else:
54
+ state_dict[key] = checkpoint['state_dict'][key]
55
+ model.load_state_dict(state_dict)
56
+
57
+ if optimizer:
58
+ optimizer.load_state_dict(checkpoint['optim_dict'])
59
+
60
+ return checkpoint
61
+
code/utils/labels.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import IntEnum
2
+
3
+ class Condition(IntEnum):
4
+ ABSENT = 0
5
+ PRESENT = 1
6
+
7
+ @staticmethod
8
+ def convert(s):
9
+ value = str(s).upper()
10
+ if value == 'ABSENT':
11
+ return Condition.ABSENT
12
+ if value == 'PRESENT':
13
+ return Condition.PRESENT
14
+ raise ValueError(f"Unsupported condition label: {s}")
15
+