Aakash-Tripathi commited on
Commit
cf14762
·
verified ·
1 Parent(s): 281583f

Upload 20 files

Browse files
.gitattributes ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ checkpoints/28a7cd44f5bcd3e6cc760b65c7e0d54d.ckpt filter=lfs diff=lfs merge=lfs -text
2
+ checkpoints/56ce1a7d241dc342982f5466c4a9d7ef.ckpt filter=lfs diff=lfs merge=lfs -text
3
+ checkpoints/624407ef8e3a2a009f9fa51f9846fe9a.ckpt filter=lfs diff=lfs merge=lfs -text
4
+ checkpoints/64a91b25f84141d32852e75a3aec7305.ckpt filter=lfs diff=lfs merge=lfs -text
5
+ checkpoints/65fd1f04cb4c5847d86a9ed8ba31ac1a.ckpt filter=lfs diff=lfs merge=lfs -text
LICENSE ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Original Sybil Model:
4
+ Copyright (c) 2022 Peter Mikhael & Jeremy Wohlwend
5
+
6
+ Hugging Face Adaptation:
7
+ Copyright (c) 2025 Aakash Tripathi
8
+
9
+ Permission is hereby granted, free of charge, to any person obtaining a copy
10
+ of this software and associated documentation files (the "Software"), to deal
11
+ in the Software without restriction, including without limitation the rights
12
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
13
+ copies of the Software, and to permit persons to whom the Software is
14
+ furnished to do so, subject to the following conditions:
15
+
16
+ The above copyright notice and this permission notice shall be included in all
17
+ copies or substantial portions of the Software.
18
+
19
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
22
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
24
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25
+ SOFTWARE.
README.md ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ tags:
4
+ - medical
5
+ - cancer
6
+ - ct-scan
7
+ - risk-prediction
8
+ - healthcare
9
+ - pytorch
10
+ - vision
11
+ datasets:
12
+ - NLST
13
+ metrics:
14
+ - auc
15
+ - c-index
16
+ language:
17
+ - en
18
+ library_name: transformers
19
+ pipeline_tag: image-classification
20
+ ---
21
+
22
+ # Sybil - Lung Cancer Risk Prediction
23
+
24
+ ## 🎯 Model Description
25
+
26
+ Sybil is a validated deep learning model that predicts future lung cancer risk from a single low-dose chest CT (LDCT) scan. Published in the Journal of Clinical Oncology, this model can assess cancer risk over a 1-6 year timeframe.
27
+
28
+ ### Key Features
29
+ - **Single Scan Analysis**: Requires only one LDCT scan
30
+ - **Multi-Year Prediction**: Provides risk scores for years 1-6
31
+ - **Validated Performance**: Tested across multiple institutions globally
32
+ - **Ensemble Approach**: Uses 5 models for robust predictions
33
+
34
+ ## 🚀 Quick Start
35
+
36
+ ### Installation
37
+
38
+ ```bash
39
+ pip install huggingface-hub torch torchvision pydicom
40
+ ```
41
+
42
+ ### Basic Usage
43
+
44
+ ```python
45
+ from huggingface_hub import snapshot_download
46
+ import sys
47
+
48
+ # Download model
49
+ model_path = snapshot_download(repo_id="Lab-Rasool/sybil")
50
+ sys.path.append(model_path)
51
+
52
+ # Import model
53
+ from modeling_sybil_wrapper import SybilHFWrapper
54
+ from configuration_sybil import SybilConfig
55
+
56
+ # Initialize
57
+ config = SybilConfig()
58
+ model = SybilHFWrapper(config)
59
+
60
+ # Prepare your DICOM files (CT scan slices)
61
+ dicom_paths = ["scan1.dcm", "scan2.dcm", ...] # Replace with actual paths
62
+
63
+ # Get predictions
64
+ output = model(dicom_paths=dicom_paths)
65
+ risk_scores = output.risk_scores.numpy()
66
+
67
+ # Display results
68
+ print("Lung Cancer Risk Predictions:")
69
+ for i, score in enumerate(risk_scores):
70
+ print(f"Year {i+1}: {score*100:.1f}%")
71
+ ```
72
+
73
+ ## 📊 Example with Demo Data
74
+
75
+ ```python
76
+ import requests
77
+ import zipfile
78
+ from io import BytesIO
79
+ import os
80
+
81
+ # Download demo DICOM files
82
+ def get_demo_data():
83
+ cache_dir = os.path.expanduser("~/.sybil_demo")
84
+ demo_dir = os.path.join(cache_dir, "sybil_demo_data")
85
+
86
+ if not os.path.exists(demo_dir):
87
+ print("Downloading demo data...")
88
+ url = "https://www.dropbox.com/scl/fi/covbvo6f547kak4em3cjd/sybil_example.zip?rlkey=7a13nhlc9uwga9x7pmtk1cf1c&dl=1"
89
+ response = requests.get(url)
90
+
91
+ os.makedirs(cache_dir, exist_ok=True)
92
+ with zipfile.ZipFile(BytesIO(response.content)) as zf:
93
+ zf.extractall(cache_dir)
94
+
95
+ # Find DICOM files
96
+ dicom_files = []
97
+ for root, dirs, files in os.walk(cache_dir):
98
+ for file in files:
99
+ if file.endswith('.dcm'):
100
+ dicom_files.append(os.path.join(root, file))
101
+
102
+ return sorted(dicom_files)
103
+
104
+ # Run demo
105
+ from huggingface_hub import snapshot_download
106
+ import sys
107
+
108
+ # Load model
109
+ model_path = snapshot_download(repo_id="Lab-Rasool/sybil")
110
+ sys.path.append(model_path)
111
+
112
+ from modeling_sybil_wrapper import SybilHFWrapper
113
+ from configuration_sybil import SybilConfig
114
+
115
+ # Initialize and predict
116
+ config = SybilConfig()
117
+ model = SybilHFWrapper(config)
118
+
119
+ dicom_files = get_demo_data()
120
+ output = model(dicom_paths=dicom_files)
121
+
122
+ # Show results
123
+ for i, score in enumerate(output.risk_scores.numpy()):
124
+ print(f"Year {i+1}: {score*100:.1f}% risk")
125
+ ```
126
+
127
+ Expected output for demo data:
128
+ ```
129
+ Year 1: 2.2% risk
130
+ Year 2: 4.5% risk
131
+ Year 3: 7.2% risk
132
+ Year 4: 7.9% risk
133
+ Year 5: 9.6% risk
134
+ Year 6: 13.6% risk
135
+ ```
136
+
137
+ ## 📈 Performance Metrics
138
+
139
+ | Dataset | 1-Year AUC | 6-Year AUC | Sample Size |
140
+ |---------|------------|------------|-------------|
141
+ | NLST Test | 0.94 | 0.86 | ~15,000 |
142
+ | MGH | 0.86 | 0.75 | ~12,000 |
143
+ | CGMH Taiwan | 0.94 | 0.80 | ~8,000 |
144
+
145
+ ## 🏥 Intended Use
146
+
147
+ ### Primary Use Cases
148
+ - Risk stratification in lung cancer screening programs
149
+ - Research on lung cancer prediction models
150
+ - Clinical decision support (with appropriate oversight)
151
+
152
+ ### Users
153
+ - Healthcare providers
154
+ - Medical researchers
155
+ - Screening program coordinators
156
+
157
+ ### Out of Scope
158
+ - ❌ Diagnosis of existing cancer
159
+ - ❌ Use with non-LDCT imaging (X-rays, MRI)
160
+ - ❌ Sole basis for clinical decisions
161
+ - ❌ Use outside medical supervision
162
+
163
+ ## 📋 Input Requirements
164
+
165
+ - **Format**: DICOM files from chest CT scan
166
+ - **Type**: Low-dose CT (LDCT)
167
+ - **Orientation**: Axial view
168
+ - **Order**: Anatomically ordered (abdomen → clavicles)
169
+ - **Number of slices**: Typically 100-300 slices
170
+ - **Resolution**: Automatically handled by model
171
+
172
+ ## ⚠️ Important Considerations
173
+
174
+ ### Medical AI Notice
175
+ This model should **supplement, not replace**, clinical judgment. Always consider:
176
+ - Complete patient medical history
177
+ - Additional risk factors (smoking, family history)
178
+ - Current clinical guidelines
179
+ - Need for professional medical oversight
180
+
181
+ ### Limitations
182
+ - Optimized for screening population (ages 55-80)
183
+ - Best performance with LDCT scans
184
+ - Not validated for pediatric use
185
+ - Performance may vary with different scanner manufacturers
186
+
187
+ ## 📚 Citation
188
+
189
+ If you use this model, please cite the original paper:
190
+
191
+ ```bibtex
192
+ @article{mikhael2023sybil,
193
+ title={Sybil: a validated deep learning model to predict future lung cancer risk from a single low-dose chest computed tomography},
194
+ author={Mikhael, Peter G and Wohlwend, Jeremy and Yala, Adam and others},
195
+ journal={Journal of Clinical Oncology},
196
+ volume={41},
197
+ number={12},
198
+ pages={2191--2200},
199
+ year={2023},
200
+ publisher={American Society of Clinical Oncology}
201
+ }
202
+ ```
203
+
204
+ ## 🙏 Acknowledgments
205
+
206
+ This Hugging Face implementation is based on the original work by:
207
+ - **Original Authors**: Peter G. Mikhael & Jeremy Wohlwend
208
+ - **Institutions**: MIT CSAIL & Massachusetts General Hospital
209
+ - **Original Repository**: [GitHub](https://github.com/reginabarzilaygroup/Sybil)
210
+ - **Paper**: [Journal of Clinical Oncology](https://doi.org/10.1200/JCO.22.01345)
211
+
212
+ ## 📄 License
213
+
214
+ MIT License - See [LICENSE](LICENSE) file
215
+
216
+ - Original Model © 2022 Peter Mikhael & Jeremy Wohlwend
217
+ - HF Adaptation © 2025 Aakash Tripathi
218
+
219
+ ## 🔧 Troubleshooting
220
+
221
+ ### Common Issues
222
+
223
+ 1. **Import Error**: Make sure to append model path to sys.path
224
+ ```python
225
+ sys.path.append(model_path)
226
+ ```
227
+
228
+ 2. **Missing Dependencies**: Install all requirements
229
+ ```bash
230
+ pip install torch torchvision pydicom sybil huggingface-hub
231
+ ```
232
+
233
+ 3. **DICOM Loading Error**: Ensure DICOM files are valid CT scans
234
+ ```python
235
+ import pydicom
236
+ dcm = pydicom.dcmread("your_file.dcm") # Test single file
237
+ ```
238
+
239
+ 4. **Memory Issues**: Model requires ~4GB GPU memory
240
+ ```python
241
+ import torch
242
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
243
+ ```
244
+
245
+ ## 📬 Support
246
+
247
+ - **HF Model Issues**: Open issue on this repository
248
+ - **Original Model**: [GitHub Issues](https://github.com/reginabarzilaygroup/Sybil/issues)
249
+ - **Medical Questions**: Consult healthcare professionals
250
+
251
+ ## 🔍 Additional Resources
252
+
253
+ - [Original GitHub Repository](https://github.com/reginabarzilaygroup/Sybil)
254
+ - [Paper (Open Access)](https://doi.org/10.1200/JCO.22.01345)
255
+ - [NLST Dataset Information](https://cdas.cancer.gov/nlst/)
256
+ - [Demo Data](https://github.com/reginabarzilaygroup/Sybil/releases)
257
+
258
+ ---
259
+
260
+ **Note**: This is a research model. Always consult qualified healthcare professionals for medical decisions.
__init__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Hugging Face Sybil model for lung cancer risk prediction"""
2
+
3
+ from .configuration_sybil import SybilConfig
4
+ from .modeling_sybil import (
5
+ SybilForRiskPrediction,
6
+ SybilPreTrainedModel,
7
+ SybilOutput,
8
+ SybilEnsemble,
9
+ )
10
+ from .image_processing_sybil import SybilImageProcessor
11
+
12
+ __version__ = "1.0.0"
13
+
14
+ __all__ = [
15
+ "SybilConfig",
16
+ "SybilForRiskPrediction",
17
+ "SybilPreTrainedModel",
18
+ "SybilOutput",
19
+ "SybilEnsemble",
20
+ "SybilImageProcessor",
21
+ ]
config.json ADDED
@@ -0,0 +1,2018 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "calibrator_data": {
3
+ "Year1": [
4
+ {
5
+ "coef": [
6
+ [
7
+ 0.854556657547542
8
+ ]
9
+ ],
10
+ "intercept": [
11
+ -1.1545482239584481
12
+ ],
13
+ "x0": [
14
+ -1.1360357729796648,
15
+ -1.1161314190688554,
16
+ -1.1160988999061974,
17
+ -1.073612876529882,
18
+ -1.0735402933768126,
19
+ -1.0280575376367502,
20
+ -1.0278736221139817,
21
+ -0.9560547696464696,
22
+ -0.9534790354878826,
23
+ -0.8734646409329012,
24
+ -0.872773038089129,
25
+ -0.7986212739447631,
26
+ -0.7978832178830266,
27
+ -0.7657858779493016,
28
+ -0.7617341080705604,
29
+ -0.7110751930073937,
30
+ -0.7104674300723599,
31
+ -0.6202510021996833,
32
+ -0.6177653984897673,
33
+ -0.5117729076924928,
34
+ -0.49671799022835506,
35
+ -0.40163472186962823,
36
+ -0.37576231688531325,
37
+ -0.36935111064413173
38
+ ],
39
+ "x_max": -0.36935111064413173,
40
+ "x_min": -1.1360357729796648,
41
+ "y0": [
42
+ 0.0,
43
+ 0.0,
44
+ 0.003745318352059925,
45
+ 0.003745318352059925,
46
+ 0.004878048780487805,
47
+ 0.004878048780487805,
48
+ 0.007194244604316547,
49
+ 0.007194244604316547,
50
+ 0.012987012987012988,
51
+ 0.012987012987012988,
52
+ 0.045454545454545456,
53
+ 0.045454545454545456,
54
+ 0.0625,
55
+ 0.0625,
56
+ 0.1,
57
+ 0.1,
58
+ 0.125,
59
+ 0.125,
60
+ 0.23529411764705882,
61
+ 0.23529411764705882,
62
+ 0.375,
63
+ 0.375,
64
+ 1.0,
65
+ 1.0
66
+ ]
67
+ },
68
+ {
69
+ "coef": [
70
+ [
71
+ 0.8610539752599486
72
+ ]
73
+ ],
74
+ "intercept": [
75
+ -1.1567984290115039
76
+ ],
77
+ "x0": [
78
+ -1.1378705470845625,
79
+ -1.106519323099567,
80
+ -1.1065117337381025,
81
+ -1.0733190326687234,
82
+ -1.0731443298019434,
83
+ -0.941562854808599,
84
+ -0.9414871023316992,
85
+ -0.8306046195608187,
86
+ -0.8293963521574212,
87
+ -0.8006963509914216,
88
+ -0.8005737921060307,
89
+ -0.6439063788575624,
90
+ -0.6379002236311682,
91
+ -0.5359935368850415,
92
+ -0.5292345785887509,
93
+ -0.4602247393296057,
94
+ -0.44611609712109157,
95
+ -0.38348501673488056,
96
+ -0.36534548053198035,
97
+ -0.36046565583271617
98
+ ],
99
+ "x_max": -0.36046565583271617,
100
+ "x_min": -1.1378705470845625,
101
+ "y0": [
102
+ 0.0,
103
+ 0.0,
104
+ 0.003067484662576687,
105
+ 0.003067484662576687,
106
+ 0.006230529595015576,
107
+ 0.006230529595015576,
108
+ 0.03571428571428571,
109
+ 0.03571428571428571,
110
+ 0.08333333333333333,
111
+ 0.08333333333333333,
112
+ 0.09803921568627451,
113
+ 0.09803921568627451,
114
+ 0.14285714285714285,
115
+ 0.14285714285714285,
116
+ 0.25,
117
+ 0.25,
118
+ 0.5,
119
+ 0.5,
120
+ 1.0,
121
+ 1.0
122
+ ]
123
+ },
124
+ {
125
+ "coef": [
126
+ [
127
+ 0.931170651053989
128
+ ]
129
+ ],
130
+ "intercept": [
131
+ -1.1777126799892834
132
+ ],
133
+ "x0": [
134
+ -1.1551989104135245,
135
+ -1.1404158612667903,
136
+ -1.1403709080380073,
137
+ -1.1082794886641045,
138
+ -1.108175942566476,
139
+ -1.0433254136052175,
140
+ -1.042702707840477,
141
+ -1.0140383035344585,
142
+ -1.0130984171672797,
143
+ -0.7646651111536567,
144
+ -0.7600911556810273,
145
+ -0.7312889535464594,
146
+ -0.7246139107321459,
147
+ -0.6148745839835493,
148
+ -0.6138490162558539,
149
+ -0.3805688230821349,
150
+ -0.3740350608528874,
151
+ -0.34233082063097187,
152
+ -0.32275895005404653
153
+ ],
154
+ "x_max": -0.32275895005404653,
155
+ "x_min": -1.1551989104135245,
156
+ "y0": [
157
+ 0.0,
158
+ 0.0,
159
+ 0.002105263157894737,
160
+ 0.002105263157894737,
161
+ 0.0030120481927710845,
162
+ 0.0030120481927710845,
163
+ 0.016666666666666666,
164
+ 0.016666666666666666,
165
+ 0.016853932584269662,
166
+ 0.016853932584269662,
167
+ 0.06666666666666667,
168
+ 0.06666666666666667,
169
+ 0.12903225806451613,
170
+ 0.12903225806451613,
171
+ 0.23333333333333334,
172
+ 0.23333333333333334,
173
+ 0.3333333333333333,
174
+ 0.3333333333333333,
175
+ 1.0
176
+ ]
177
+ },
178
+ {
179
+ "coef": [
180
+ [
181
+ 0.802573263743587
182
+ ]
183
+ ],
184
+ "intercept": [
185
+ -1.133514632876246
186
+ ],
187
+ "x0": [
188
+ -1.1145748000846425,
189
+ -1.001099080307295,
190
+ -1.0006650304322395,
191
+ -0.7822120240879247,
192
+ -0.7809762245125411,
193
+ -0.7602535779621472,
194
+ -0.7599881299259228,
195
+ -0.7228992892466355,
196
+ -0.7204894477845987,
197
+ -0.6758539945396886,
198
+ -0.6733669918445594,
199
+ -0.6239612713939791,
200
+ -0.618395903844087,
201
+ -0.5797944303247502,
202
+ -0.5795030067463254,
203
+ -0.4308790865818225,
204
+ -0.42913485044975996,
205
+ -0.4127336156492978
206
+ ],
207
+ "x_max": -0.4127336156492978,
208
+ "x_min": -1.1145748000846425,
209
+ "y0": [
210
+ 0.0,
211
+ 0.0,
212
+ 0.018779342723004695,
213
+ 0.018779342723004695,
214
+ 0.06666666666666667,
215
+ 0.06666666666666667,
216
+ 0.08333333333333333,
217
+ 0.08333333333333333,
218
+ 0.125,
219
+ 0.125,
220
+ 0.14285714285714285,
221
+ 0.14285714285714285,
222
+ 0.3333333333333333,
223
+ 0.3333333333333333,
224
+ 0.4,
225
+ 0.4,
226
+ 0.5,
227
+ 0.5
228
+ ]
229
+ },
230
+ {
231
+ "coef": [
232
+ [
233
+ 0.7571081357285535
234
+ ]
235
+ ],
236
+ "intercept": [
237
+ -1.1240990711329706
238
+ ],
239
+ "x0": [
240
+ -1.1080936473109704,
241
+ -1.0478081580528156,
242
+ -1.0477693656166227,
243
+ -0.906516611811425,
244
+ -0.9051812314122925,
245
+ -0.8537131170836589,
246
+ -0.8530645720823533,
247
+ -0.809823116729157,
248
+ -0.8059229792340918,
249
+ -0.775062431397729,
250
+ -0.7727583513501239,
251
+ -0.6678702375770744,
252
+ -0.6576353522247389,
253
+ -0.6414058198676769,
254
+ -0.6391149846419681,
255
+ -0.5392171226404363,
256
+ -0.5379400690975253,
257
+ -0.43595259388335383,
258
+ -0.4059231304165102,
259
+ -0.40304212217287894
260
+ ],
261
+ "x_max": -0.40304212217287894,
262
+ "x_min": -1.1080936473109704,
263
+ "y0": [
264
+ 0.0,
265
+ 0.0,
266
+ 0.005633802816901409,
267
+ 0.005633802816901409,
268
+ 0.023809523809523808,
269
+ 0.023809523809523808,
270
+ 0.05263157894736842,
271
+ 0.05263157894736842,
272
+ 0.09090909090909091,
273
+ 0.09090909090909091,
274
+ 0.17857142857142858,
275
+ 0.17857142857142858,
276
+ 0.2,
277
+ 0.2,
278
+ 0.3333333333333333,
279
+ 0.3333333333333333,
280
+ 0.5,
281
+ 0.5,
282
+ 1.0,
283
+ 1.0
284
+ ]
285
+ }
286
+ ],
287
+ "Year2": [
288
+ {
289
+ "coef": [
290
+ [
291
+ 1.062266673654182
292
+ ]
293
+ ],
294
+ "intercept": [
295
+ -1.1402943638341543
296
+ ],
297
+ "x0": [
298
+ -1.1132237228060184,
299
+ -1.0984032927059206,
300
+ -1.0981069262090772,
301
+ -1.081592725093658,
302
+ -1.0814958159559502,
303
+ -1.033709096648227,
304
+ -1.0337017915615265,
305
+ -0.9433254512205224,
306
+ -0.9432834568651239,
307
+ -0.8978076538523547,
308
+ -0.8964592757256059,
309
+ -0.7908901199818554,
310
+ -0.7900304149572032,
311
+ -0.7597781118771237,
312
+ -0.7575168749204351,
313
+ -0.6978551950375774,
314
+ -0.6969377457956264,
315
+ -0.6754802605747152,
316
+ -0.6743523171985429,
317
+ -0.5982205872578594,
318
+ -0.5978797570805355,
319
+ -0.20437640643562294,
320
+ -0.17221541004131058,
321
+ -0.1642458849457179
322
+ ],
323
+ "x_max": -0.1642458849457179,
324
+ "x_min": -1.1132237228060184,
325
+ "y0": [
326
+ 0.0,
327
+ 0.0,
328
+ 0.004166666666666667,
329
+ 0.004166666666666667,
330
+ 0.00646551724137931,
331
+ 0.00646551724137931,
332
+ 0.022950819672131147,
333
+ 0.022950819672131147,
334
+ 0.029850746268656716,
335
+ 0.029850746268656716,
336
+ 0.030927835051546393,
337
+ 0.030927835051546393,
338
+ 0.05555555555555555,
339
+ 0.05555555555555555,
340
+ 0.08,
341
+ 0.08,
342
+ 0.1,
343
+ 0.1,
344
+ 0.17391304347826086,
345
+ 0.17391304347826086,
346
+ 0.21875,
347
+ 0.21875,
348
+ 1.0,
349
+ 1.0
350
+ ]
351
+ },
352
+ {
353
+ "coef": [
354
+ [
355
+ 0.9452799797637848
356
+ ]
357
+ ],
358
+ "intercept": [
359
+ -1.1056176033444525
360
+ ],
361
+ "x0": [
362
+ -1.0812889671248251,
363
+ -1.0466438113101284,
364
+ -1.0466141854157391,
365
+ -1.0190228994163042,
366
+ -1.0190087854754137,
367
+ -0.8633880083865842,
368
+ -0.8629823664007918,
369
+ -0.8348653114530901,
370
+ -0.8347858113708686,
371
+ -0.8020762603552521,
372
+ -0.7996074475609661,
373
+ -0.7839658204696691,
374
+ -0.7835622350062019,
375
+ -0.7475163850050008,
376
+ -0.7461899281051441,
377
+ -0.5425558200873033,
378
+ -0.5359621587675816,
379
+ -0.4767176382463163,
380
+ -0.4681188958309078,
381
+ -0.4115751243820749,
382
+ -0.37794935041320266,
383
+ -0.25666073454046767,
384
+ -0.2367468372616991,
385
+ -0.2313896811187538
386
+ ],
387
+ "x_max": -0.2313896811187538,
388
+ "x_min": -1.0812889671248251,
389
+ "y0": [
390
+ 0.0,
391
+ 0.0,
392
+ 0.006578947368421052,
393
+ 0.006578947368421052,
394
+ 0.011876484560570071,
395
+ 0.011876484560570071,
396
+ 0.04,
397
+ 0.04,
398
+ 0.043478260869565216,
399
+ 0.043478260869565216,
400
+ 0.07692307692307693,
401
+ 0.07692307692307693,
402
+ 0.1,
403
+ 0.1,
404
+ 0.11290322580645161,
405
+ 0.11290322580645161,
406
+ 0.23076923076923078,
407
+ 0.23076923076923078,
408
+ 0.42857142857142855,
409
+ 0.42857142857142855,
410
+ 0.6666666666666666,
411
+ 0.6666666666666666,
412
+ 1.0,
413
+ 1.0
414
+ ]
415
+ },
416
+ {
417
+ "coef": [
418
+ [
419
+ 0.977569119429414
420
+ ]
421
+ ],
422
+ "intercept": [
423
+ -1.1131720692602696
424
+ ],
425
+ "x0": [
426
+ -1.0845931804016375,
427
+ -1.065066758858955,
428
+ -1.0649363886111944,
429
+ -1.0336518336720375,
430
+ -1.033579472521642,
431
+ -1.0014803065603592,
432
+ -1.0011882908982757,
433
+ -0.6795431230600604,
434
+ -0.674741256056814,
435
+ -0.6518493782261137,
436
+ -0.64987028801675,
437
+ -0.5222888178703454,
438
+ -0.5212121480468886,
439
+ -0.3052231965882647,
440
+ -0.29144341938190454,
441
+ -0.23616471543543072,
442
+ -0.21561761564892545
443
+ ],
444
+ "x_max": -0.21561761564892545,
445
+ "x_min": -1.0845931804016375,
446
+ "y0": [
447
+ 0.0,
448
+ 0.0,
449
+ 0.002369668246445498,
450
+ 0.002369668246445498,
451
+ 0.004629629629629629,
452
+ 0.004629629629629629,
453
+ 0.02570694087403599,
454
+ 0.02570694087403599,
455
+ 0.1,
456
+ 0.1,
457
+ 0.14285714285714285,
458
+ 0.14285714285714285,
459
+ 0.38461538461538464,
460
+ 0.38461538461538464,
461
+ 0.6666666666666666,
462
+ 0.6666666666666666,
463
+ 1.0
464
+ ]
465
+ },
466
+ {
467
+ "coef": [
468
+ [
469
+ 0.9331653645301372
470
+ ]
471
+ ],
472
+ "intercept": [
473
+ -1.1044707886310574
474
+ ],
475
+ "x0": [
476
+ -1.0774566054058767,
477
+ -1.0420772338529536,
478
+ -1.0419548398643057,
479
+ -0.9493604640979585,
480
+ -0.9491299707151104,
481
+ -0.9365174523436794,
482
+ -0.9364732614670692,
483
+ -0.7179492653874966,
484
+ -0.7154435675953599,
485
+ -0.6731615199328296,
486
+ -0.67142962135454,
487
+ -0.6270414570925323,
488
+ -0.6242394940967904,
489
+ -0.5120045680430358,
490
+ -0.5055336220583483,
491
+ -0.33179717675997744,
492
+ -0.328290718304015,
493
+ -0.2875046806008894,
494
+ -0.28547662806097474,
495
+ -0.2664066378897004
496
+ ],
497
+ "x_max": -0.2664066378897004,
498
+ "x_min": -1.0774566054058767,
499
+ "y0": [
500
+ 0.0,
501
+ 0.0,
502
+ 0.007054673721340388,
503
+ 0.007054673721340388,
504
+ 0.029411764705882353,
505
+ 0.029411764705882353,
506
+ 0.041025641025641026,
507
+ 0.041025641025641026,
508
+ 0.1,
509
+ 0.1,
510
+ 0.14285714285714285,
511
+ 0.14285714285714285,
512
+ 0.26666666666666666,
513
+ 0.26666666666666666,
514
+ 0.42105263157894735,
515
+ 0.42105263157894735,
516
+ 0.6666666666666666,
517
+ 0.6666666666666666,
518
+ 0.75,
519
+ 0.75
520
+ ]
521
+ },
522
+ {
523
+ "coef": [
524
+ [
525
+ 0.8961901274568367
526
+ ]
527
+ ],
528
+ "intercept": [
529
+ -1.0982403142894093
530
+ ],
531
+ "x0": [
532
+ -1.0758350750074313,
533
+ -1.0632542041947801,
534
+ -1.0631388433027222,
535
+ -1.0490756815117528,
536
+ -1.0490719723622743,
537
+ -1.004288045432703,
538
+ -1.0039359266248848,
539
+ -0.8597483194001383,
540
+ -0.857685938810237,
541
+ -0.7810773324119306,
542
+ -0.7788166141512711,
543
+ -0.7262313576523969,
544
+ -0.7216147585777637,
545
+ -0.6730376804506044,
546
+ -0.6717071674683137,
547
+ -0.5268755303702289,
548
+ -0.5241638650003476,
549
+ -0.40591460382490485,
550
+ -0.4044029534761878,
551
+ -0.28368021375389385,
552
+ -0.24813428917349833,
553
+ -0.24472403504569196
554
+ ],
555
+ "x_max": -0.24472403504569196,
556
+ "x_min": -1.0758350750074313,
557
+ "y0": [
558
+ 0.0,
559
+ 0.0,
560
+ 0.003703703703703704,
561
+ 0.003703703703703704,
562
+ 0.003937007874015748,
563
+ 0.003937007874015748,
564
+ 0.01078167115902965,
565
+ 0.01078167115902965,
566
+ 0.05172413793103448,
567
+ 0.05172413793103448,
568
+ 0.08695652173913043,
569
+ 0.08695652173913043,
570
+ 0.23076923076923078,
571
+ 0.23076923076923078,
572
+ 0.26666666666666666,
573
+ 0.26666666666666666,
574
+ 0.4444444444444444,
575
+ 0.4444444444444444,
576
+ 0.625,
577
+ 0.625,
578
+ 1.0,
579
+ 1.0
580
+ ]
581
+ }
582
+ ],
583
+ "Year3": [
584
+ {
585
+ "coef": [
586
+ [
587
+ 1.072854890710045
588
+ ]
589
+ ],
590
+ "intercept": [
591
+ -1.0975483040639067
592
+ ],
593
+ "x0": [
594
+ -1.0668657274687154,
595
+ -1.055840060430324,
596
+ -1.0556273522769775,
597
+ -1.036306556853323,
598
+ -1.0361733269949625,
599
+ -1.0144346304595142,
600
+ -1.0142514059319403,
601
+ -1.0076178099514084,
602
+ -1.0076047887161141,
603
+ -0.9745636399616542,
604
+ -0.9744364251306473,
605
+ -0.893484508393487,
606
+ -0.8934749962572068,
607
+ -0.8126541951262676,
608
+ -0.8108757933379082,
609
+ -0.7132392298627734,
610
+ -0.7109554538692253,
611
+ -0.6506990920367363,
612
+ -0.6497724980555664,
613
+ -0.628972765275148,
614
+ -0.6269619476258803,
615
+ -0.5500713693644852,
616
+ -0.5497271419386303,
617
+ -0.3569641318650252,
618
+ -0.352800853663226,
619
+ -0.2250071731934148,
620
+ -0.21752555028003018,
621
+ -0.15230151923890023,
622
+ -0.11981995587303407,
623
+ -0.11177099398097867
624
+ ],
625
+ "x_max": -0.11177099398097867,
626
+ "x_min": -1.0668657274687154,
627
+ "y0": [
628
+ 0.0,
629
+ 0.0,
630
+ 0.008032128514056224,
631
+ 0.008032128514056224,
632
+ 0.014598540145985401,
633
+ 0.014598540145985401,
634
+ 0.015873015873015872,
635
+ 0.015873015873015872,
636
+ 0.01694915254237288,
637
+ 0.01694915254237288,
638
+ 0.03463203463203463,
639
+ 0.03463203463203463,
640
+ 0.04424778761061947,
641
+ 0.04424778761061947,
642
+ 0.06557377049180328,
643
+ 0.06557377049180328,
644
+ 0.08,
645
+ 0.08,
646
+ 0.1111111111111111,
647
+ 0.1111111111111111,
648
+ 0.17391304347826086,
649
+ 0.17391304347826086,
650
+ 0.2222222222222222,
651
+ 0.2222222222222222,
652
+ 0.25,
653
+ 0.25,
654
+ 0.4,
655
+ 0.4,
656
+ 1.0,
657
+ 1.0
658
+ ]
659
+ },
660
+ {
661
+ "coef": [
662
+ [
663
+ 0.9914570976903353
664
+ ]
665
+ ],
666
+ "intercept": [
667
+ -1.0790425515518447
668
+ ],
669
+ "x0": [
670
+ -1.0509692533832513,
671
+ -1.0306651082008873,
672
+ -1.030658061068699,
673
+ -1.0208099967000621,
674
+ -1.020741246533108,
675
+ -0.985112419883033,
676
+ -0.9850553410670821,
677
+ -0.975147553361191,
678
+ -0.974998182229142,
679
+ -0.8779299862351682,
680
+ -0.8776659773207016,
681
+ -0.8460688378948997,
682
+ -0.845974211308601,
683
+ -0.741678014120675,
684
+ -0.7412547134258067,
685
+ -0.48847508646284665,
686
+ -0.4815593234559564,
687
+ -0.4194206961418063,
688
+ -0.4104019034227454,
689
+ -0.35109595731867727,
690
+ -0.3158275575001387,
691
+ -0.1886139682457576,
692
+ -0.16210841876048143
693
+ ],
694
+ "x_max": -0.16210841876048143,
695
+ "x_min": -1.0509692533832513,
696
+ "y0": [
697
+ 0.0,
698
+ 0.0,
699
+ 0.005405405405405406,
700
+ 0.005405405405405406,
701
+ 0.007281553398058253,
702
+ 0.007281553398058253,
703
+ 0.014925373134328358,
704
+ 0.014925373134328358,
705
+ 0.020338983050847456,
706
+ 0.020338983050847456,
707
+ 0.04081632653061224,
708
+ 0.04081632653061224,
709
+ 0.08974358974358974,
710
+ 0.08974358974358974,
711
+ 0.13414634146341464,
712
+ 0.13414634146341464,
713
+ 0.23076923076923078,
714
+ 0.23076923076923078,
715
+ 0.42857142857142855,
716
+ 0.42857142857142855,
717
+ 0.7777777777777778,
718
+ 0.7777777777777778,
719
+ 1.0
720
+ ]
721
+ },
722
+ {
723
+ "coef": [
724
+ [
725
+ 1.0359640190491
726
+ ]
727
+ ],
728
+ "intercept": [
729
+ -1.0883499518982132
730
+ ],
731
+ "x0": [
732
+ -1.0552559216978283,
733
+ -1.0333495853697827,
734
+ -1.0333285407884145,
735
+ -1.0011969268015661,
736
+ -1.0011933068093923,
737
+ -0.9916828772639642,
738
+ -0.9911346684278486,
739
+ -0.9698102979418284,
740
+ -0.9697949843715242,
741
+ -0.9302941777878011,
742
+ -0.930229033365741,
743
+ -0.6288182656438468,
744
+ -0.6237295600569457,
745
+ -0.5994702403937844,
746
+ -0.5973729296189209,
747
+ -0.5172434380370258,
748
+ -0.5152343346620796,
749
+ -0.4833593233187604,
750
+ -0.4610294208915229,
751
+ -0.23213841177916572,
752
+ -0.21753550228399432,
753
+ -0.1589547357990222,
754
+ -0.13718025903664743,
755
+ -0.136125351637146
756
+ ],
757
+ "x_max": -0.136125351637146,
758
+ "x_min": -1.0552559216978283,
759
+ "y0": [
760
+ 0.0,
761
+ 0.0,
762
+ 0.0049261083743842365,
763
+ 0.0049261083743842365,
764
+ 0.012345679012345678,
765
+ 0.012345679012345678,
766
+ 0.014925373134328358,
767
+ 0.014925373134328358,
768
+ 0.04516129032258064,
769
+ 0.04516129032258064,
770
+ 0.05263157894736842,
771
+ 0.05263157894736842,
772
+ 0.1,
773
+ 0.1,
774
+ 0.16,
775
+ 0.16,
776
+ 0.2222222222222222,
777
+ 0.2222222222222222,
778
+ 0.3181818181818182,
779
+ 0.3181818181818182,
780
+ 0.6666666666666666,
781
+ 0.6666666666666666,
782
+ 1.0,
783
+ 1.0
784
+ ]
785
+ },
786
+ {
787
+ "coef": [
788
+ [
789
+ 0.9429092225039666
790
+ ]
791
+ ],
792
+ "intercept": [
793
+ -1.0694328972087712
794
+ ],
795
+ "x0": [
796
+ -1.039598461979245,
797
+ -1.027090895221624,
798
+ -1.0270806700122288,
799
+ -1.0023414590831823,
800
+ -1.0022412513285868,
801
+ -0.9797331874122825,
802
+ -0.9796138218795896,
803
+ -0.9353377659698549,
804
+ -0.9347416338032845,
805
+ -0.90973249280169,
806
+ -0.9093901256737834,
807
+ -0.8997258380366621,
808
+ -0.8996811857309828,
809
+ -0.6788754217433769,
810
+ -0.6763435601389729,
811
+ -0.5870183789328516,
812
+ -0.5841871586044692,
813
+ -0.47078030609401256,
814
+ -0.4642417922579798,
815
+ -0.4189702554849567,
816
+ -0.4188905613761421,
817
+ -0.288691238429426,
818
+ -0.2851481664915613,
819
+ -0.24393625211769732,
820
+ -0.24188702320683753,
821
+ -0.2226179094044748
822
+ ],
823
+ "x_max": -0.2226179094044748,
824
+ "x_min": -1.039598461979245,
825
+ "y0": [
826
+ 0.0,
827
+ 0.0,
828
+ 0.0026595744680851063,
829
+ 0.0026595744680851063,
830
+ 0.008695652173913044,
831
+ 0.008695652173913044,
832
+ 0.011583011583011582,
833
+ 0.011583011583011582,
834
+ 0.013888888888888888,
835
+ 0.013888888888888888,
836
+ 0.034482758620689655,
837
+ 0.034482758620689655,
838
+ 0.05641025641025641,
839
+ 0.05641025641025641,
840
+ 0.18181818181818182,
841
+ 0.18181818181818182,
842
+ 0.21428571428571427,
843
+ 0.21428571428571427,
844
+ 0.5,
845
+ 0.5,
846
+ 0.5714285714285714,
847
+ 0.5714285714285714,
848
+ 0.6666666666666666,
849
+ 0.6666666666666666,
850
+ 0.75,
851
+ 0.75
852
+ ]
853
+ },
854
+ {
855
+ "coef": [
856
+ [
857
+ 0.9300079387746696
858
+ ]
859
+ ],
860
+ "intercept": [
861
+ -1.0690194713018735
862
+ ],
863
+ "x0": [
864
+ -1.0432696190394435,
865
+ -1.0296239120498654,
866
+ -1.029574597651545,
867
+ -1.0151942225358568,
868
+ -1.0150830797862347,
869
+ -0.9686807739460517,
870
+ -0.9686333650499847,
871
+ -0.9202678722062687,
872
+ -0.9200592370322587,
873
+ -0.8215279605321205,
874
+ -0.8193877558337873,
875
+ -0.8032501334690247,
876
+ -0.8001076484435512,
877
+ -0.7398883171030026,
878
+ -0.7375422904448954,
879
+ -0.682972724782267,
880
+ -0.6781819179456279,
881
+ -0.6402737868235991,
882
+ -0.6374435270037346,
883
+ -0.6026054025305556,
884
+ -0.6024444257001804,
885
+ -0.4871964897395866,
886
+ -0.47609418826063177,
887
+ -0.350568790778378,
888
+ -0.3490000981741114,
889
+ -0.22372187551508194,
890
+ -0.18683462242218773,
891
+ -0.18329568206240143
892
+ ],
893
+ "x_max": -0.18329568206240143,
894
+ "x_min": -1.0432696190394435,
895
+ "y0": [
896
+ 0.0,
897
+ 0.0,
898
+ 0.0040650406504065045,
899
+ 0.0040650406504065045,
900
+ 0.009708737864077669,
901
+ 0.009708737864077669,
902
+ 0.015151515151515152,
903
+ 0.015151515151515152,
904
+ 0.017142857142857144,
905
+ 0.017142857142857144,
906
+ 0.05555555555555555,
907
+ 0.05555555555555555,
908
+ 0.09523809523809523,
909
+ 0.09523809523809523,
910
+ 0.13043478260869565,
911
+ 0.13043478260869565,
912
+ 0.2727272727272727,
913
+ 0.2727272727272727,
914
+ 0.3,
915
+ 0.3,
916
+ 0.30434782608695654,
917
+ 0.30434782608695654,
918
+ 0.5,
919
+ 0.5,
920
+ 0.625,
921
+ 0.625,
922
+ 1.0,
923
+ 1.0
924
+ ]
925
+ }
926
+ ],
927
+ "Year4": [
928
+ {
929
+ "coef": [
930
+ [
931
+ 1.0713611402736163
932
+ ]
933
+ ],
934
+ "intercept": [
935
+ -1.0720420740830974
936
+ ],
937
+ "x0": [
938
+ -1.0414022172560469,
939
+ -1.0303919014036294,
940
+ -1.030179489406734,
941
+ -1.0108855945889734,
942
+ -1.0107525502283543,
943
+ -0.989044120775359,
944
+ -0.9888611513537834,
945
+ -0.9822367914190364,
946
+ -0.9822237883133847,
947
+ -0.959024555591222,
948
+ -0.9588443480324798,
949
+ -0.9492286431991185,
950
+ -0.9491016054910486,
951
+ -0.868262399207094,
952
+ -0.8682529003146905,
953
+ -0.8274793436967418,
954
+ -0.826701839397852,
955
+ -0.6882680786258673,
956
+ -0.6859874823646543,
957
+ -0.6041189393814397,
958
+ -0.6021109214208971,
959
+ -0.5253273989559846,
960
+ -0.5249836508026502,
961
+ -0.20071579408044027,
962
+ -0.1932445879322574,
963
+ -0.12811136919545385,
964
+ -0.09567503035144409,
965
+ -0.08763727513835629
966
+ ],
967
+ "x_max": -0.08763727513835629,
968
+ "x_min": -1.0414022172560469,
969
+ "y0": [
970
+ 0.0,
971
+ 0.0,
972
+ 0.008130081300813007,
973
+ 0.008130081300813007,
974
+ 0.01486988847583643,
975
+ 0.01486988847583643,
976
+ 0.015873015873015872,
977
+ 0.015873015873015872,
978
+ 0.018292682926829267,
979
+ 0.018292682926829267,
980
+ 0.028169014084507043,
981
+ 0.028169014084507043,
982
+ 0.04329004329004329,
983
+ 0.04329004329004329,
984
+ 0.06896551724137931,
985
+ 0.06896551724137931,
986
+ 0.07964601769911504,
987
+ 0.07964601769911504,
988
+ 0.11764705882352941,
989
+ 0.11764705882352941,
990
+ 0.21739130434782608,
991
+ 0.21739130434782608,
992
+ 0.25,
993
+ 0.25,
994
+ 0.4,
995
+ 0.4,
996
+ 1.0,
997
+ 1.0
998
+ ]
999
+ },
1000
+ {
1001
+ "coef": [
1002
+ [
1003
+ 1.0236741286444109
1004
+ ]
1005
+ ],
1006
+ "intercept": [
1007
+ -1.0625940402455896
1008
+ ],
1009
+ "x0": [
1010
+ -1.0336085106570108,
1011
+ -1.0144313554536946,
1012
+ -1.014352061695433,
1013
+ -1.0024692401049637,
1014
+ -1.0023982559268134,
1015
+ -0.9959623202441721,
1016
+ -0.9959512916504678,
1017
+ -0.9667386748515856,
1018
+ -0.9665320450724462,
1019
+ -0.8652485370410349,
1020
+ -0.8652359983079437,
1021
+ -0.8220499321037427,
1022
+ -0.821952230661591,
1023
+ -0.7693876649771454,
1024
+ -0.7693015717781595,
1025
+ -0.7142669671983582,
1026
+ -0.7138299115042861,
1027
+ -0.7017531895419336,
1028
+ -0.700041515189594,
1029
+ -0.45283630424364407,
1030
+ -0.4456958160808363,
1031
+ -0.38153801710077406,
1032
+ -0.3722261620546843,
1033
+ -0.3109930912108302,
1034
+ -0.2745786578107059,
1035
+ -0.1432313101190359,
1036
+ -0.1216659095169288,
1037
+ -0.1158644726262289
1038
+ ],
1039
+ "x_max": -0.1158644726262289,
1040
+ "x_min": -1.0336085106570108,
1041
+ "y0": [
1042
+ 0.0,
1043
+ 0.0,
1044
+ 0.00975609756097561,
1045
+ 0.00975609756097561,
1046
+ 0.010752688172043012,
1047
+ 0.010752688172043012,
1048
+ 0.019543973941368076,
1049
+ 0.019543973941368076,
1050
+ 0.02586206896551724,
1051
+ 0.02586206896551724,
1052
+ 0.046875,
1053
+ 0.046875,
1054
+ 0.06976744186046512,
1055
+ 0.06976744186046512,
1056
+ 0.09090909090909091,
1057
+ 0.09090909090909091,
1058
+ 0.14285714285714285,
1059
+ 0.14285714285714285,
1060
+ 0.14864864864864866,
1061
+ 0.14864864864864866,
1062
+ 0.23076923076923078,
1063
+ 0.23076923076923078,
1064
+ 0.42857142857142855,
1065
+ 0.42857142857142855,
1066
+ 0.7777777777777778,
1067
+ 0.7777777777777778,
1068
+ 1.0,
1069
+ 1.0
1070
+ ]
1071
+ },
1072
+ {
1073
+ "coef": [
1074
+ [
1075
+ 1.0396994880608152
1076
+ ]
1077
+ ],
1078
+ "intercept": [
1079
+ -1.0658926889834355
1080
+ ],
1081
+ "x0": [
1082
+ -1.0326793286499445,
1083
+ -1.0106940026668443,
1084
+ -1.0106728822031288,
1085
+ -0.9784254083542221,
1086
+ -0.9784217753091158,
1087
+ -0.9469256058925717,
1088
+ -0.9469102371047443,
1089
+ -0.9370942140091818,
1090
+ -0.9369620532787766,
1091
+ -0.915314170567322,
1092
+ -0.9148419676598699,
1093
+ -0.8716363389381502,
1094
+ -0.870683567049227,
1095
+ -0.6047040278319293,
1096
+ -0.5995969734397801,
1097
+ -0.5752501797570768,
1098
+ -0.5731453065193639,
1099
+ -0.49272688479392623,
1100
+ -0.49071053701351774,
1101
+ -0.4587205910634634,
1102
+ -0.43631017169063924,
1103
+ -0.20659382959771744,
1104
+ -0.19193826507312706,
1105
+ -0.1331462686285585,
1106
+ -0.11129327766907215
1107
+ ],
1108
+ "x_max": -0.11129327766907215,
1109
+ "x_min": -1.0326793286499445,
1110
+ "y0": [
1111
+ 0.0,
1112
+ 0.0,
1113
+ 0.009950248756218905,
1114
+ 0.009950248756218905,
1115
+ 0.014150943396226415,
1116
+ 0.014150943396226415,
1117
+ 0.045454545454545456,
1118
+ 0.045454545454545456,
1119
+ 0.05,
1120
+ 0.05,
1121
+ 0.0673076923076923,
1122
+ 0.0673076923076923,
1123
+ 0.07142857142857142,
1124
+ 0.07142857142857142,
1125
+ 0.1,
1126
+ 0.1,
1127
+ 0.2,
1128
+ 0.2,
1129
+ 0.2222222222222222,
1130
+ 0.2222222222222222,
1131
+ 0.4090909090909091,
1132
+ 0.4090909090909091,
1133
+ 0.6666666666666666,
1134
+ 0.6666666666666666,
1135
+ 1.0
1136
+ ]
1137
+ },
1138
+ {
1139
+ "coef": [
1140
+ [
1141
+ 0.9866801376021137
1142
+ ]
1143
+ ],
1144
+ "intercept": [
1145
+ -1.055717348217441
1146
+ ],
1147
+ "x0": [
1148
+ -1.0244979646808392,
1149
+ -1.0114097824943242,
1150
+ -1.0113990826191164,
1151
+ -0.9855114494715504,
1152
+ -0.9854065899593796,
1153
+ -0.9761218403287525,
1154
+ -0.9760201492440737,
1155
+ -0.9512964316870484,
1156
+ -0.9511816258120083,
1157
+ -0.8886034699831773,
1158
+ -0.8882452097850468,
1159
+ -0.8781322949592707,
1160
+ -0.8780855698429388,
1161
+ -0.7753388033507341,
1162
+ -0.7726365096184081,
1163
+ -0.6674841788771404,
1164
+ -0.6664703408904253,
1165
+ -0.6470297519604127,
1166
+ -0.644380358469925,
1167
+ -0.550908601082995,
1168
+ -0.5479459522971747,
1169
+ -0.429274625122231,
1170
+ -0.422432586061124,
1171
+ -0.2387327764997047,
1172
+ -0.23502523114565355,
1173
+ -0.19190021298402493,
1174
+ -0.18975585654388327,
1175
+ -0.16959224863314815
1176
+ ],
1177
+ "x_max": -0.16959224863314815,
1178
+ "x_min": -1.0244979646808392,
1179
+ "y0": [
1180
+ 0.0,
1181
+ 0.0,
1182
+ 0.008152173913043478,
1183
+ 0.008152173913043478,
1184
+ 0.010416666666666666,
1185
+ 0.010416666666666666,
1186
+ 0.015151515151515152,
1187
+ 0.015151515151515152,
1188
+ 0.019230769230769232,
1189
+ 0.019230769230769232,
1190
+ 0.034482758620689655,
1191
+ 0.034482758620689655,
1192
+ 0.056910569105691054,
1193
+ 0.056910569105691054,
1194
+ 0.07936507936507936,
1195
+ 0.07936507936507936,
1196
+ 0.2,
1197
+ 0.2,
1198
+ 0.20588235294117646,
1199
+ 0.20588235294117646,
1200
+ 0.26666666666666666,
1201
+ 0.26666666666666666,
1202
+ 0.5,
1203
+ 0.5,
1204
+ 0.6666666666666666,
1205
+ 0.6666666666666666,
1206
+ 0.75,
1207
+ 0.75
1208
+ ]
1209
+ },
1210
+ {
1211
+ "coef": [
1212
+ [
1213
+ 0.9720870020574502
1214
+ ]
1215
+ ],
1216
+ "intercept": [
1217
+ -1.0561492900363496
1218
+ ],
1219
+ "x0": [
1220
+ -1.0292343620603719,
1221
+ -1.0149712425262198,
1222
+ -1.0149196968526584,
1223
+ -0.9998886684583321,
1224
+ -0.9997724969529331,
1225
+ -0.9634315079071001,
1226
+ -0.9634086791923234,
1227
+ -0.951270676170449,
1228
+ -0.9512211222153406,
1229
+ -0.8147576198447524,
1230
+ -0.8126328401139585,
1231
+ -0.7783549744382297,
1232
+ -0.7750703047918094,
1233
+ -0.7121262945010313,
1234
+ -0.7096741197122537,
1235
+ -0.6526355039360778,
1236
+ -0.6476279326443739,
1237
+ -0.4480011933435484,
1238
+ -0.43639655803813926,
1239
+ -0.3051916486678352,
1240
+ -0.3035519791260457,
1241
+ -0.17260542823084102,
1242
+ -0.13404917749555145,
1243
+ -0.13035011452912415
1244
+ ],
1245
+ "x_max": -0.13035011452912415,
1246
+ "x_min": -1.0292343620603719,
1247
+ "y0": [
1248
+ 0.0,
1249
+ 0.0,
1250
+ 0.00819672131147541,
1251
+ 0.00819672131147541,
1252
+ 0.011467889908256881,
1253
+ 0.011467889908256881,
1254
+ 0.012987012987012988,
1255
+ 0.012987012987012988,
1256
+ 0.036211699164345405,
1257
+ 0.036211699164345405,
1258
+ 0.07142857142857142,
1259
+ 0.07142857142857142,
1260
+ 0.075,
1261
+ 0.075,
1262
+ 0.13043478260869565,
1263
+ 0.13043478260869565,
1264
+ 0.3170731707317073,
1265
+ 0.3170731707317073,
1266
+ 0.5,
1267
+ 0.5,
1268
+ 0.7142857142857143,
1269
+ 0.7142857142857143,
1270
+ 1.0,
1271
+ 1.0
1272
+ ]
1273
+ }
1274
+ ],
1275
+ "Year5": [
1276
+ {
1277
+ "coef": [
1278
+ [
1279
+ 1.1130009773475025
1280
+ ]
1281
+ ],
1282
+ "intercept": [
1283
+ -1.05794336672334
1284
+ ],
1285
+ "x0": [
1286
+ -1.0261126522122785,
1287
+ -1.0146744061867454,
1288
+ -1.0144537385227264,
1289
+ -0.9969480439120526,
1290
+ -0.9968250495003456,
1291
+ -0.9737491227404367,
1292
+ -0.9736590578600733,
1293
+ -0.9412351121614915,
1294
+ -0.940990193071027,
1295
+ -0.9329694846791076,
1296
+ -0.9328188264757706,
1297
+ -0.8462435476743763,
1298
+ -0.8462336630102207,
1299
+ -0.8038753893527305,
1300
+ -0.8030676331731484,
1301
+ -0.6592534983234417,
1302
+ -0.6568842637374115,
1303
+ -0.6446790254063086,
1304
+ -0.6381987988020102,
1305
+ -0.5718338270620633,
1306
+ -0.5697477317150653,
1307
+ -0.4899798519505131,
1308
+ -0.48962280992041207,
1309
+ -0.15275186333929358,
1310
+ -0.14499027909631035,
1311
+ -0.07732563986998098,
1312
+ -0.04362862074369689,
1313
+ -0.03527840143325056
1314
+ ],
1315
+ "x_max": -0.03527840143325056,
1316
+ "x_min": -1.0261126522122785,
1317
+ "y0": [
1318
+ 0.0,
1319
+ 0.0,
1320
+ 0.009950248756218905,
1321
+ 0.009950248756218905,
1322
+ 0.018867924528301886,
1323
+ 0.018867924528301886,
1324
+ 0.020242914979757085,
1325
+ 0.020242914979757085,
1326
+ 0.05084745762711865,
1327
+ 0.05084745762711865,
1328
+ 0.05150214592274678,
1329
+ 0.05150214592274678,
1330
+ 0.07017543859649122,
1331
+ 0.07017543859649122,
1332
+ 0.09433962264150944,
1333
+ 0.09433962264150944,
1334
+ 0.14285714285714285,
1335
+ 0.14285714285714285,
1336
+ 0.14814814814814814,
1337
+ 0.14814814814814814,
1338
+ 0.21739130434782608,
1339
+ 0.21739130434782608,
1340
+ 0.28,
1341
+ 0.28,
1342
+ 0.4,
1343
+ 0.4,
1344
+ 1.0,
1345
+ 1.0
1346
+ ]
1347
+ },
1348
+ {
1349
+ "coef": [
1350
+ [
1351
+ 1.0638269123140547
1352
+ ]
1353
+ ],
1354
+ "intercept": [
1355
+ -1.0500459135423696
1356
+ ],
1357
+ "x0": [
1358
+ -1.0199234501712975,
1359
+ -0.9999940866794924,
1360
+ -0.9999116787251026,
1361
+ -0.9875627671022001,
1362
+ -0.9874889986274953,
1363
+ -0.9504306995378152,
1364
+ -0.9502159648739382,
1365
+ -0.9230139844783734,
1366
+ -0.922969931058108,
1367
+ -0.8449597096117958,
1368
+ -0.8449466632048586,
1369
+ -0.8000666587391363,
1370
+ -0.7999651250375173,
1371
+ -0.7453387570904597,
1372
+ -0.7452492869558871,
1373
+ -0.6880559631457261,
1374
+ -0.6876018277071652,
1375
+ -0.4163708637296526,
1376
+ -0.4089503591453124,
1377
+ -0.3422760227720679,
1378
+ -0.3325989812075447,
1379
+ -0.26896409304876334,
1380
+ -0.2311212697577245,
1381
+ -0.14109465757978035,
1382
+ -0.1050875889776246,
1383
+ -0.066181523151848
1384
+ ],
1385
+ "x_max": -0.066181523151848,
1386
+ "x_min": -1.0199234501712975,
1387
+ "y0": [
1388
+ 0.0,
1389
+ 0.0,
1390
+ 0.010101010101010102,
1391
+ 0.010101010101010102,
1392
+ 0.02570694087403599,
1393
+ 0.02570694087403599,
1394
+ 0.02857142857142857,
1395
+ 0.02857142857142857,
1396
+ 0.031088082901554404,
1397
+ 0.031088082901554404,
1398
+ 0.08196721311475409,
1399
+ 0.08196721311475409,
1400
+ 0.0975609756097561,
1401
+ 0.0975609756097561,
1402
+ 0.11764705882352941,
1403
+ 0.11764705882352941,
1404
+ 0.14285714285714285,
1405
+ 0.14285714285714285,
1406
+ 0.3076923076923077,
1407
+ 0.3076923076923077,
1408
+ 0.42857142857142855,
1409
+ 0.42857142857142855,
1410
+ 0.8333333333333334,
1411
+ 0.8333333333333334,
1412
+ 1.0,
1413
+ 1.0
1414
+ ]
1415
+ },
1416
+ {
1417
+ "coef": [
1418
+ [
1419
+ 1.096758407454776
1420
+ ]
1421
+ ],
1422
+ "intercept": [
1423
+ -1.055167618027271
1424
+ ],
1425
+ "x0": [
1426
+ -1.020131505808367,
1427
+ -1.0059211960996113,
1428
+ -1.0059050165555292,
1429
+ -0.996939616577453,
1430
+ -0.9969173370183926,
1431
+ -0.9835280206852376,
1432
+ -0.9833181850728733,
1433
+ -0.929671597216839,
1434
+ -0.9296553849868093,
1435
+ -0.9193006483755612,
1436
+ -0.919161242808875,
1437
+ -0.8963253198907385,
1438
+ -0.8958272187346468,
1439
+ -0.8878365014579762,
1440
+ -0.8877675667944027,
1441
+ -0.8502504399770857,
1442
+ -0.8492453634301178,
1443
+ -0.5686688293396501,
1444
+ -0.5632815641355099,
1445
+ -0.5375985154051841,
1446
+ -0.5353781589819728,
1447
+ -0.45054635791402964,
1448
+ -0.4484194179291747,
1449
+ -0.4146738572381863,
1450
+ -0.3910334841196985,
1451
+ -0.2983852309049294,
1452
+ -0.29808164382324565,
1453
+ -0.14871026244525198,
1454
+ -0.13325039755840862,
1455
+ -0.07123188438986094,
1456
+ -0.048179596841319006
1457
+ ],
1458
+ "x_max": -0.048179596841319006,
1459
+ "x_min": -1.020131505808367,
1460
+ "y0": [
1461
+ 0.0,
1462
+ 0.0,
1463
+ 0.007518796992481203,
1464
+ 0.007518796992481203,
1465
+ 0.017543859649122806,
1466
+ 0.017543859649122806,
1467
+ 0.021479713603818614,
1468
+ 0.021479713603818614,
1469
+ 0.04878048780487805,
1470
+ 0.04878048780487805,
1471
+ 0.06329113924050633,
1472
+ 0.06329113924050633,
1473
+ 0.06896551724137931,
1474
+ 0.06896551724137931,
1475
+ 0.07142857142857142,
1476
+ 0.07142857142857142,
1477
+ 0.07453416149068323,
1478
+ 0.07453416149068323,
1479
+ 0.1111111111111111,
1480
+ 0.1111111111111111,
1481
+ 0.2,
1482
+ 0.2,
1483
+ 0.25,
1484
+ 0.25,
1485
+ 0.42857142857142855,
1486
+ 0.42857142857142855,
1487
+ 0.4375,
1488
+ 0.4375,
1489
+ 0.6666666666666666,
1490
+ 0.6666666666666666,
1491
+ 1.0
1492
+ ]
1493
+ },
1494
+ {
1495
+ "coef": [
1496
+ [
1497
+ 1.0192893240958014
1498
+ ]
1499
+ ],
1500
+ "intercept": [
1501
+ -1.0434135518718168
1502
+ ],
1503
+ "x0": [
1504
+ -1.0111623826452667,
1505
+ -0.9959888476152152,
1506
+ -0.9959373241054497,
1507
+ -0.9611874678027185,
1508
+ -0.9610824158887104,
1509
+ -0.9355415867075108,
1510
+ -0.9354229865671895,
1511
+ -0.8979901713623936,
1512
+ -0.8978116598109821,
1513
+ -0.8846290685337032,
1514
+ -0.8843235043891193,
1515
+ -0.8755302346654252,
1516
+ -0.8751063969353022,
1517
+ -0.8599594191627716,
1518
+ -0.8599111498093823,
1519
+ -0.7541104380913812,
1520
+ -0.7509770614187808,
1521
+ -0.6423494788656237,
1522
+ -0.6413021645189869,
1523
+ -0.6212190757900966,
1524
+ -0.6184820606801298,
1525
+ -0.52192117855335,
1526
+ -0.5188606160028857,
1527
+ -0.39626727267313655,
1528
+ -0.3891991083266022,
1529
+ -0.34026030244508254,
1530
+ -0.3401742134913346,
1531
+ -0.19942813028762418,
1532
+ -0.19559799202994355,
1533
+ -0.1510479001582279,
1534
+ -0.14883255251569716,
1535
+ -0.1280025494626167
1536
+ ],
1537
+ "x_max": -0.1280025494626167,
1538
+ "x_min": -1.0111623826452667,
1539
+ "y0": [
1540
+ 0.0,
1541
+ 0.0,
1542
+ 0.0136986301369863,
1543
+ 0.0136986301369863,
1544
+ 0.021739130434782608,
1545
+ 0.021739130434782608,
1546
+ 0.02247191011235955,
1547
+ 0.02247191011235955,
1548
+ 0.02564102564102564,
1549
+ 0.02564102564102564,
1550
+ 0.05,
1551
+ 0.05,
1552
+ 0.05405405405405406,
1553
+ 0.05405405405405406,
1554
+ 0.059322033898305086,
1555
+ 0.059322033898305086,
1556
+ 0.109375,
1557
+ 0.109375,
1558
+ 0.2,
1559
+ 0.2,
1560
+ 0.2727272727272727,
1561
+ 0.2727272727272727,
1562
+ 0.2857142857142857,
1563
+ 0.2857142857142857,
1564
+ 0.5,
1565
+ 0.5,
1566
+ 0.6,
1567
+ 0.6,
1568
+ 0.6666666666666666,
1569
+ 0.6666666666666666,
1570
+ 0.75,
1571
+ 0.75
1572
+ ]
1573
+ },
1574
+ {
1575
+ "coef": [
1576
+ [
1577
+ 1.0246281291281125
1578
+ ]
1579
+ ],
1580
+ "intercept": [
1581
+ -1.046126360981268
1582
+ ],
1583
+ "x0": [
1584
+ -1.017756686004363,
1585
+ -1.0076691078074915,
1586
+ -1.007111022611281,
1587
+ -0.9865689458798288,
1588
+ -0.9864557783600643,
1589
+ -0.9355790769588279,
1590
+ -0.9355268446213867,
1591
+ -0.8592524325149089,
1592
+ -0.8591646101223447,
1593
+ -0.8017786006234253,
1594
+ -0.8017372850124589,
1595
+ -0.7916875145693405,
1596
+ -0.7894478908774645,
1597
+ -0.6835089846082325,
1598
+ -0.6809242702145596,
1599
+ -0.6208027275469761,
1600
+ -0.6155244368578598,
1601
+ -0.5322587317287224,
1602
+ -0.5320813769109154,
1603
+ -0.40510803046102895,
1604
+ -0.392876105638554,
1605
+ -0.25457965566226204,
1606
+ -0.2528513622775791,
1607
+ -0.11482711286015956,
1608
+ -0.07418678158401704,
1609
+ -0.07028790707685573
1610
+ ],
1611
+ "x_max": -0.07028790707685573,
1612
+ "x_min": -1.017756686004363,
1613
+ "y0": [
1614
+ 0.0,
1615
+ 0.0,
1616
+ 0.010638297872340425,
1617
+ 0.010638297872340425,
1618
+ 0.016393442622950817,
1619
+ 0.016393442622950817,
1620
+ 0.04918032786885246,
1621
+ 0.04918032786885246,
1622
+ 0.05555555555555555,
1623
+ 0.05555555555555555,
1624
+ 0.07142857142857142,
1625
+ 0.07142857142857142,
1626
+ 0.08333333333333333,
1627
+ 0.08333333333333333,
1628
+ 0.13043478260869565,
1629
+ 0.13043478260869565,
1630
+ 0.3157894736842105,
1631
+ 0.3157894736842105,
1632
+ 0.3333333333333333,
1633
+ 0.3333333333333333,
1634
+ 0.5,
1635
+ 0.5,
1636
+ 0.7142857142857143,
1637
+ 0.7142857142857143,
1638
+ 1.0,
1639
+ 1.0
1640
+ ]
1641
+ }
1642
+ ],
1643
+ "Year6": [
1644
+ {
1645
+ "coef": [
1646
+ [
1647
+ 1.340606917145108
1648
+ ]
1649
+ ],
1650
+ "intercept": [
1651
+ -1.0475421908007814
1652
+ ],
1653
+ "x0": [
1654
+ -1.0092021741074475,
1655
+ -0.995633400824669,
1656
+ -0.9951590414806812,
1657
+ -0.9820283475396332,
1658
+ -0.9817901515581261,
1659
+ -0.906122027838656,
1660
+ -0.9058965319804526,
1661
+ -0.8970114597943656,
1662
+ -0.8968299923620552,
1663
+ -0.7925502837390519,
1664
+ -0.792538377685589,
1665
+ -0.7439244631515856,
1666
+ -0.7405450017919006,
1667
+ -0.5673212330021985,
1668
+ -0.5644674958186597,
1669
+ -0.46202445575510687,
1670
+ -0.4595117590828409,
1671
+ -0.35183289217277947,
1672
+ -0.35087936911255946,
1673
+ 0.04275873029063826,
1674
+ 0.05210753926378531,
1675
+ 0.1336094294140786,
1676
+ 0.17419740538830197,
1677
+ 0.18425522384048465
1678
+ ],
1679
+ "x_max": 0.18425522384048465,
1680
+ "x_min": -1.0092021741074475,
1681
+ "y0": [
1682
+ 0.0,
1683
+ 0.0,
1684
+ 0.022988505747126436,
1685
+ 0.022988505747126436,
1686
+ 0.03211009174311927,
1687
+ 0.03211009174311927,
1688
+ 0.046511627906976744,
1689
+ 0.046511627906976744,
1690
+ 0.06936416184971098,
1691
+ 0.06936416184971098,
1692
+ 0.10810810810810811,
1693
+ 0.10810810810810811,
1694
+ 0.13414634146341464,
1695
+ 0.13414634146341464,
1696
+ 0.18518518518518517,
1697
+ 0.18518518518518517,
1698
+ 0.3333333333333333,
1699
+ 0.3333333333333333,
1700
+ 0.35294117647058826,
1701
+ 0.35294117647058826,
1702
+ 0.5,
1703
+ 0.5,
1704
+ 1.0,
1705
+ 1.0
1706
+ ]
1707
+ },
1708
+ {
1709
+ "coef": [
1710
+ [
1711
+ 1.2907222572139896
1712
+ ]
1713
+ ],
1714
+ "intercept": [
1715
+ -1.039821376539258
1716
+ ],
1717
+ "x0": [
1718
+ -1.0032743278933614,
1719
+ -0.9791775889843037,
1720
+ -0.9789944017955169,
1721
+ -0.9643426097971722,
1722
+ -0.963922185149528,
1723
+ -0.9613656665174612,
1724
+ -0.961356819217672,
1725
+ -0.9205582848036054,
1726
+ -0.9202258763639181,
1727
+ -0.7909939357247382,
1728
+ -0.7909781067514198,
1729
+ -0.7780041955414604,
1730
+ -0.7776605356444324,
1731
+ -0.7365259961709816,
1732
+ -0.7364028071380476,
1733
+ -0.7206493820664985,
1734
+ -0.7201010418121743,
1735
+ -0.6034922842627248,
1736
+ -0.6000744184880863,
1737
+ -0.27099471764664695,
1738
+ -0.26199155151510733,
1739
+ -0.1810967659576298,
1740
+ -0.16935578367307202,
1741
+ -0.09214870653380258,
1742
+ -0.04623468261490005,
1743
+ 0.06299300351957249,
1744
+ 0.10667973894981264,
1745
+ 0.15388377611101745
1746
+ ],
1747
+ "x_max": 0.15388377611101745,
1748
+ "x_min": -1.0032743278933614,
1749
+ "y0": [
1750
+ 0.0,
1751
+ 0.0,
1752
+ 0.014388489208633094,
1753
+ 0.014388489208633094,
1754
+ 0.04,
1755
+ 0.04,
1756
+ 0.0421455938697318,
1757
+ 0.0421455938697318,
1758
+ 0.05138339920948617,
1759
+ 0.05138339920948617,
1760
+ 0.07692307692307693,
1761
+ 0.07692307692307693,
1762
+ 0.08333333333333333,
1763
+ 0.08333333333333333,
1764
+ 0.14285714285714285,
1765
+ 0.14285714285714285,
1766
+ 0.15217391304347827,
1767
+ 0.15217391304347827,
1768
+ 0.20689655172413793,
1769
+ 0.20689655172413793,
1770
+ 0.36363636363636365,
1771
+ 0.36363636363636365,
1772
+ 0.6,
1773
+ 0.6,
1774
+ 0.8333333333333334,
1775
+ 0.8333333333333334,
1776
+ 1.0,
1777
+ 1.0
1778
+ ]
1779
+ },
1780
+ {
1781
+ "coef": [
1782
+ [
1783
+ 1.2981443067116996
1784
+ ]
1785
+ ],
1786
+ "intercept": [
1787
+ -1.0424440037259006
1788
+ ],
1789
+ "x0": [
1790
+ -1.0009745886777577,
1791
+ -0.9841549932959078,
1792
+ -0.9841358428769162,
1793
+ -0.9735242233444598,
1794
+ -0.9734978528306313,
1795
+ -0.9576500009696335,
1796
+ -0.9574016355103996,
1797
+ -0.893904504212875,
1798
+ -0.8938853151061683,
1799
+ -0.8816292500621034,
1800
+ -0.8814642469570553,
1801
+ -0.8544352101854283,
1802
+ -0.8538456480944956,
1803
+ -0.8443876817194529,
1804
+ -0.8443060893282346,
1805
+ -0.79990009777071,
1806
+ -0.7987104698741856,
1807
+ -0.7891257045127901,
1808
+ -0.7888712360664938,
1809
+ -0.42983927753286244,
1810
+ -0.4272112210437845,
1811
+ -0.3268026726926374,
1812
+ -0.32428518569341347,
1813
+ -0.284343292209814,
1814
+ -0.2563620927371575,
1815
+ -0.14670184156420663,
1816
+ -0.1463425100660386,
1817
+ 0.030456324575673266,
1818
+ 0.048754917455359426,
1819
+ 0.11314767903017375,
1820
+ 0.1494463503795691
1821
+ ],
1822
+ "x_max": 0.1494463503795691,
1823
+ "x_min": -1.0009745886777577,
1824
+ "y0": [
1825
+ 0.0,
1826
+ 0.0,
1827
+ 0.00980392156862745,
1828
+ 0.00980392156862745,
1829
+ 0.022556390977443608,
1830
+ 0.022556390977443608,
1831
+ 0.03289473684210526,
1832
+ 0.03289473684210526,
1833
+ 0.0625,
1834
+ 0.0625,
1835
+ 0.08771929824561403,
1836
+ 0.08771929824561403,
1837
+ 0.1,
1838
+ 0.1,
1839
+ 0.10526315789473684,
1840
+ 0.10526315789473684,
1841
+ 0.1111111111111111,
1842
+ 0.1111111111111111,
1843
+ 0.14545454545454545,
1844
+ 0.14545454545454545,
1845
+ 0.2631578947368421,
1846
+ 0.2631578947368421,
1847
+ 0.2857142857142857,
1848
+ 0.2857142857142857,
1849
+ 0.42857142857142855,
1850
+ 0.42857142857142855,
1851
+ 0.6363636363636364,
1852
+ 0.6363636363636364,
1853
+ 0.8,
1854
+ 0.8,
1855
+ 1.0
1856
+ ]
1857
+ },
1858
+ {
1859
+ "coef": [
1860
+ [
1861
+ 1.267499953639535
1862
+ ]
1863
+ ],
1864
+ "intercept": [
1865
+ -1.035861815812616
1866
+ ],
1867
+ "x0": [
1868
+ -0.9957570540603671,
1869
+ -0.9821540666109716,
1870
+ -0.982102731143961,
1871
+ -0.9638658914809329,
1872
+ -0.9638377872959027,
1873
+ -0.9336125767773732,
1874
+ -0.9334819433124802,
1875
+ -0.8550258905297626,
1876
+ -0.8548039090199113,
1877
+ -0.8384111798528299,
1878
+ -0.8380312067382899,
1879
+ -0.8279540810062146,
1880
+ -0.826569609923502,
1881
+ -0.8077341397870003,
1882
+ -0.8076741161982733,
1883
+ -0.6761095113040186,
1884
+ -0.6722131153609145,
1885
+ -0.5384220470663117,
1886
+ -0.5358309092005328,
1887
+ -0.5108573188459119,
1888
+ -0.5074538038252172,
1889
+ -0.33549109502565666,
1890
+ -0.3195973473534073,
1891
+ -0.2311267327585821,
1892
+ -0.22233737553707267,
1893
+ -0.16148131365196827,
1894
+ -0.16137426088264661,
1895
+ 0.0738068478711209,
1896
+ 0.07656166239514639,
1897
+ 0.10246405073568132
1898
+ ],
1899
+ "x_max": 0.10246405073568132,
1900
+ "x_min": -0.9957570540603671,
1901
+ "y0": [
1902
+ 0.0,
1903
+ 0.0,
1904
+ 0.024691358024691357,
1905
+ 0.024691358024691357,
1906
+ 0.0297029702970297,
1907
+ 0.0297029702970297,
1908
+ 0.03543307086614173,
1909
+ 0.03543307086614173,
1910
+ 0.04,
1911
+ 0.04,
1912
+ 0.06666666666666667,
1913
+ 0.06666666666666667,
1914
+ 0.08,
1915
+ 0.08,
1916
+ 0.08791208791208792,
1917
+ 0.08791208791208792,
1918
+ 0.13725490196078433,
1919
+ 0.13725490196078433,
1920
+ 0.2857142857142857,
1921
+ 0.2857142857142857,
1922
+ 0.2972972972972973,
1923
+ 0.2972972972972973,
1924
+ 0.3333333333333333,
1925
+ 0.3333333333333333,
1926
+ 0.5714285714285714,
1927
+ 0.5714285714285714,
1928
+ 0.7272727272727273,
1929
+ 0.7272727272727273,
1930
+ 0.75,
1931
+ 0.75
1932
+ ]
1933
+ },
1934
+ {
1935
+ "coef": [
1936
+ [
1937
+ 1.262442132690388
1938
+ ]
1939
+ ],
1940
+ "intercept": [
1941
+ -1.0375704296176012
1942
+ ],
1943
+ "x0": [
1944
+ -1.0026162135821177,
1945
+ -0.9901873299993725,
1946
+ -0.9894997144193408,
1947
+ -0.9646615162846798,
1948
+ -0.9640504320285327,
1949
+ -0.9013653567360134,
1950
+ -0.9013010013844471,
1951
+ -0.8107793489199652,
1952
+ -0.8072152551147188,
1953
+ -0.7464076686646566,
1954
+ -0.7457327181651399,
1955
+ -0.5907903335367819,
1956
+ -0.5876057124447174,
1957
+ -0.5135300879442599,
1958
+ -0.5070267172431278,
1959
+ -0.40443521953298234,
1960
+ -0.40421670104020413,
1961
+ -0.2537263109006871,
1962
+ -0.232702183186429,
1963
+ -0.06230742211331197,
1964
+ -0.0601779955199484,
1965
+ 0.10988138782509971,
1966
+ 0.15995425331743718,
1967
+ 0.16475804828264184
1968
+ ],
1969
+ "x_max": 0.16475804828264184,
1970
+ "x_min": -1.0026162135821177,
1971
+ "y0": [
1972
+ 0.0,
1973
+ 0.0,
1974
+ 0.014563106796116505,
1975
+ 0.014563106796116505,
1976
+ 0.027932960893854747,
1977
+ 0.027932960893854747,
1978
+ 0.0755813953488372,
1979
+ 0.0755813953488372,
1980
+ 0.1016949152542373,
1981
+ 0.1016949152542373,
1982
+ 0.109375,
1983
+ 0.109375,
1984
+ 0.15,
1985
+ 0.15,
1986
+ 0.4,
1987
+ 0.4,
1988
+ 0.4375,
1989
+ 0.4375,
1990
+ 0.5,
1991
+ 0.5,
1992
+ 0.7142857142857143,
1993
+ 0.7142857142857143,
1994
+ 1.0,
1995
+ 1.0
1996
+ ]
1997
+ }
1998
+ ]
1999
+ },
2000
+ "censoring_distribution": "weibull",
2001
+ "dropout": 0.0,
2002
+ "ensemble_size": 5,
2003
+ "hidden_dim": 512,
2004
+ "img_size": [
2005
+ 512,
2006
+ 512
2007
+ ],
2008
+ "initializer_range": 0.02,
2009
+ "max_followup": 6,
2010
+ "model_type": "sybil",
2011
+ "num_images": 208,
2012
+ "transformers_version": "4.53.2",
2013
+ "voxel_spacing": [
2014
+ 0.703125,
2015
+ 0.703125,
2016
+ 2.5
2017
+ ]
2018
+ }
configuration_sybil.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Sybil model configuration"""
2
+
3
+ from transformers import PretrainedConfig
4
+ from typing import Optional, List, Dict
5
+ import json
6
+
7
+
8
+ class SybilConfig(PretrainedConfig):
9
+ """
10
+ This is the configuration class to store the configuration of a [`SybilForRiskPrediction`].
11
+ It is used to instantiate a Sybil model according to the specified arguments, defining the model architecture.
12
+
13
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs.
14
+
15
+ Args:
16
+ hidden_dim (`int`, *optional*, defaults to 512):
17
+ Dimensionality of the hidden representations.
18
+ dropout (`float`, *optional*, defaults to 0.0):
19
+ The dropout probability for all fully connected layers.
20
+ max_followup (`int`, *optional*, defaults to 6):
21
+ Maximum number of years for risk prediction.
22
+ num_images (`int`, *optional*, defaults to 208):
23
+ Number of CT scan slices to process.
24
+ img_size (`List[int]`, *optional*, defaults to `[512, 512]`):
25
+ Size of input images after preprocessing.
26
+ voxel_spacing (`List[float]`, *optional*, defaults to `[0.703125, 0.703125, 2.5]`):
27
+ Target voxel spacing for CT scans (row, column, slice thickness).
28
+ censoring_distribution (`str`, *optional*, defaults to "weibull"):
29
+ Distribution used for censoring in survival analysis.
30
+ ensemble_size (`int`, *optional*, defaults to 5):
31
+ Number of models in the ensemble.
32
+ calibrator_data (`Dict`, *optional*):
33
+ Calibration data for risk score adjustment.
34
+ """
35
+
36
+ model_type = "sybil"
37
+
38
+ def __init__(
39
+ self,
40
+ hidden_dim: int = 512,
41
+ dropout: float = 0.0,
42
+ max_followup: int = 6,
43
+ num_images: int = 208,
44
+ img_size: List[int] = None,
45
+ voxel_spacing: List[float] = None,
46
+ censoring_distribution: str = "weibull",
47
+ ensemble_size: int = 5,
48
+ calibrator_data: Optional[Dict] = None,
49
+ initializer_range: float = 0.02,
50
+ **kwargs
51
+ ):
52
+ super().__init__(**kwargs)
53
+
54
+ self.hidden_dim = hidden_dim
55
+ self.dropout = dropout
56
+ self.max_followup = max_followup
57
+ self.num_images = num_images
58
+ self.img_size = img_size if img_size is not None else [512, 512]
59
+ self.voxel_spacing = voxel_spacing if voxel_spacing is not None else [0.703125, 0.703125, 2.5]
60
+ self.censoring_distribution = censoring_distribution
61
+ self.ensemble_size = ensemble_size
62
+ self.calibrator_data = calibrator_data
63
+ self.initializer_range = initializer_range
image_processing_sybil.py ADDED
@@ -0,0 +1,451 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Image processor for Sybil CT scan preprocessing"""
2
+
3
+ import cv2
4
+ import numpy as np
5
+ import torch
6
+ from typing import Dict, List, Optional, Union, Tuple
7
+ from transformers.image_processing_utils import BaseImageProcessor, BatchFeature
8
+ from transformers.utils import TensorType
9
+ import pydicom
10
+ from PIL import Image
11
+ import torchio as tio
12
+
13
+
14
+ def order_slices(dicoms: List) -> List:
15
+ """Order DICOM slices by their position"""
16
+ # Sort by ImagePositionPatient if available
17
+ try:
18
+ dicoms = sorted(dicoms, key=lambda x: float(x.ImagePositionPatient[2]))
19
+ except (AttributeError, TypeError):
20
+ # Fall back to InstanceNumber if ImagePositionPatient not available
21
+ try:
22
+ dicoms = sorted(dicoms, key=lambda x: int(x.InstanceNumber))
23
+ except (AttributeError, TypeError):
24
+ pass # Keep original order if neither attribute is available
25
+ return dicoms
26
+
27
+
28
+ class SybilImageProcessor(BaseImageProcessor):
29
+ """
30
+ Constructs a Sybil image processor for preprocessing CT scans.
31
+
32
+ Args:
33
+ voxel_spacing (`List[float]`, *optional*, defaults to `[0.703125, 0.703125, 2.5]`):
34
+ Target voxel spacing for resampling (row, column, slice thickness).
35
+ img_size (`List[int]`, *optional*, defaults to `[512, 512]`):
36
+ Target image size after resizing.
37
+ num_images (`int`, *optional*, defaults to `208`):
38
+ Number of slices to use from the CT scan.
39
+ windowing (`Dict[str, float]`, *optional*):
40
+ Windowing parameters for CT scan visualization.
41
+ Default uses lung window: center=-600, width=1500.
42
+ normalize (`bool`, *optional*, defaults to `True`):
43
+ Whether to normalize pixel values to [0, 1].
44
+ **kwargs:
45
+ Additional keyword arguments passed to the parent class.
46
+ """
47
+
48
+ model_input_names = ["pixel_values"]
49
+
50
+ def __init__(
51
+ self,
52
+ voxel_spacing: List[float] = None,
53
+ img_size: List[int] = None,
54
+ num_images: int = 208,
55
+ windowing: Dict[str, float] = None,
56
+ normalize: bool = True,
57
+ **kwargs
58
+ ):
59
+ super().__init__(**kwargs)
60
+
61
+ self.voxel_spacing = voxel_spacing if voxel_spacing is not None else [0.703125, 0.703125, 2.5]
62
+ self.img_size = img_size if img_size is not None else [512, 512]
63
+ self.num_images = num_images
64
+
65
+ # Default lung window settings
66
+ self.windowing = windowing if windowing is not None else {
67
+ "center": -600,
68
+ "width": 1500
69
+ }
70
+ self.normalize = normalize
71
+
72
+ # TorchIO transforms for standardization
73
+ self.resample_transform = tio.transforms.Resample(target=self.voxel_spacing)
74
+ # Note: Original Sybil uses 200 depth, 256x256 images
75
+ self.default_depth = 200
76
+ self.default_size = [256, 256]
77
+ # TorchIO uses (H, W, D) ordering for target_shape, matching original Sybil
78
+ self.padding_transform = tio.transforms.CropOrPad(
79
+ target_shape=tuple(self.default_size + [self.default_depth]), # (256, 256, 200)
80
+ padding_mode=0
81
+ )
82
+
83
+ def load_dicom_series(self, paths: List[str]) -> Tuple[np.ndarray, Dict]:
84
+ """
85
+ Load a series of DICOM files.
86
+
87
+ Args:
88
+ paths: List of paths to DICOM files.
89
+
90
+ Returns:
91
+ Tuple of (volume array, metadata dict)
92
+ """
93
+ dicoms = []
94
+ for path in paths:
95
+ try:
96
+ dcm = pydicom.dcmread(path, stop_before_pixels=False)
97
+ dicoms.append(dcm)
98
+ except Exception as e:
99
+ print(f"Error reading DICOM file {path}: {e}")
100
+ continue
101
+
102
+ if not dicoms:
103
+ raise ValueError("No valid DICOM files found")
104
+
105
+ # Order slices by position
106
+ dicoms = order_slices(dicoms)
107
+
108
+ # Extract pixel arrays
109
+ volume = np.stack([dcm.pixel_array.astype(np.float32) for dcm in dicoms])
110
+
111
+ # Extract metadata
112
+ metadata = {
113
+ "slice_thickness": float(dicoms[0].SliceThickness) if hasattr(dicoms[0], 'SliceThickness') else None,
114
+ "pixel_spacing": list(map(float, dicoms[0].PixelSpacing)) if hasattr(dicoms[0], 'PixelSpacing') else None,
115
+ "manufacturer": str(dicoms[0].Manufacturer) if hasattr(dicoms[0], 'Manufacturer') else None,
116
+ "num_slices": len(dicoms)
117
+ }
118
+
119
+ # Apply rescale if present
120
+ if hasattr(dicoms[0], 'RescaleSlope') and hasattr(dicoms[0], 'RescaleIntercept'):
121
+ slope = float(dicoms[0].RescaleSlope)
122
+ intercept = float(dicoms[0].RescaleIntercept)
123
+ volume = volume * slope + intercept
124
+
125
+ return volume, metadata
126
+
127
+ def load_png_series(self, paths: List[str]) -> np.ndarray:
128
+ """
129
+ Load a series of PNG files.
130
+
131
+ Args:
132
+ paths: List of paths to PNG files (must be in anatomical order).
133
+
134
+ Returns:
135
+ 3D volume array
136
+ """
137
+ images = []
138
+ for path in paths:
139
+ img = Image.open(path).convert('L') # Convert to grayscale
140
+ images.append(np.array(img, dtype=np.float32))
141
+
142
+ return np.stack(images)
143
+
144
+ def resize_slices(self, volume: np.ndarray, target_size: List[int] = None) -> np.ndarray:
145
+ """
146
+ Resize each slice in the volume to target size using OpenCV bilinear interpolation.
147
+ This exactly matches the original Sybil's per-slice 2D resize operation.
148
+
149
+ Args:
150
+ volume: 3D volume array (D, H, W).
151
+ target_size: Target size [H, W]. Defaults to [256, 256].
152
+
153
+ Returns:
154
+ Resized volume.
155
+ """
156
+ if target_size is None:
157
+ target_size = self.default_size # [256, 256]
158
+
159
+ # Resize each slice using OpenCV (matching original Sybil exactly)
160
+ resized_slices = []
161
+ for i in range(volume.shape[0]):
162
+ slice_2d = volume[i] # Shape: (H, W)
163
+ # cv2.resize expects dsize=(width, height), not (height, width)!
164
+ resized = cv2.resize(
165
+ slice_2d,
166
+ dsize=(target_size[1], target_size[0]), # (W, H)
167
+ interpolation=cv2.INTER_LINEAR
168
+ )
169
+ resized_slices.append(resized)
170
+
171
+ # Stack back into volume
172
+ return np.stack(resized_slices, axis=0)
173
+
174
+ def apply_windowing(self, volume: np.ndarray) -> np.ndarray:
175
+ """
176
+ Apply DICOM-standard windowing to CT scan, matching the original Sybil implementation.
177
+
178
+ This implements the same windowing as the original Sybil:
179
+ - Uses DICOM standard formula with center-0.5 and width-1 adjustments
180
+ - Outputs to 16-bit range [0, 65535] then divides by 256 for 8-bit parity
181
+ - Results in [0, 255] range that will be normalized later
182
+
183
+ Args:
184
+ volume: 3D CT volume in Hounsfield Units.
185
+
186
+ Returns:
187
+ Windowed volume in [0, 255] range.
188
+ """
189
+ center = self.windowing["center"] # -600
190
+ width = self.windowing["width"] # 1500
191
+
192
+ # DICOM standard windowing formula (matching original Sybil)
193
+ bit_size = 16
194
+ y_min = 0
195
+ y_max = 2 ** bit_size - 1 # 65535
196
+ y_range = y_max - y_min
197
+
198
+ # DICOM standard adjustments
199
+ c = center - 0.5 # -600.5
200
+ w = width - 1 # 1499
201
+
202
+ # Calculate window boundaries
203
+ lower_bound = c - w / 2 # -1350
204
+ upper_bound = c + w / 2 # 149.5
205
+
206
+ # Apply windowing with three regions
207
+ below = volume <= lower_bound
208
+ above = volume > upper_bound
209
+ between = np.logical_and(~below, ~above)
210
+
211
+ # Create output array
212
+ windowed = np.zeros_like(volume, dtype=np.float32)
213
+
214
+ # Apply windowing
215
+ windowed[below] = y_min # Values <= -1350 -> 0
216
+ windowed[above] = y_max # Values > 149.5 -> 65535
217
+
218
+ if between.any():
219
+ # Linear interpolation for values in window
220
+ windowed[between] = ((volume[between] - c) / w + 0.5) * y_range + y_min
221
+
222
+ # Divide by 256 for 8-bit parity (matching original Sybil)
223
+ # This gives range [0, 255] instead of [0, 65535]
224
+ windowed = windowed // 256
225
+
226
+ return windowed
227
+
228
+ def resample_volume(
229
+ self,
230
+ volume: torch.Tensor,
231
+ original_spacing: Optional[List[float]] = None
232
+ ) -> torch.Tensor:
233
+ """
234
+ Resample volume to target voxel spacing.
235
+ Uses affine matrix approach matching original Sybil exactly.
236
+
237
+ Args:
238
+ volume: 3D or 4D volume tensor (D, H, W) or (C, D, H, W).
239
+ original_spacing: Original voxel spacing [H_spacing, W_spacing, D_spacing].
240
+
241
+ Returns:
242
+ Resampled volume with same number of dimensions.
243
+ """
244
+ # Handle both 3D (D, H, W) and 4D (C, D, H, W) volumes
245
+ if len(volume.shape) == 3:
246
+ # Single channel: (D, H, W) -> (1, D, H, W)
247
+ volume_4d = volume.unsqueeze(0)
248
+ squeeze_output = True
249
+ elif len(volume.shape) == 4:
250
+ # Multi-channel: (C, D, H, W) - already has channel dim
251
+ volume_4d = volume
252
+ squeeze_output = False
253
+ else:
254
+ raise ValueError(f"Expected 3D or 4D volume, got shape {volume.shape}")
255
+
256
+ # Permute to TorchIO format: (C, D, H, W) -> (C, H, W, D)
257
+ volume_tio = volume_4d.permute(0, 2, 3, 1)
258
+
259
+ # Create affine matrix like original Sybil
260
+ # Original uses torch.diag(voxel_spacing) where voxel_spacing has 4 elements
261
+ if original_spacing is not None:
262
+ # Add 1.0 as 4th element like original Sybil
263
+ voxel_spacing_4d = torch.tensor(original_spacing + [1.0], dtype=torch.float32)
264
+ affine = torch.diag(voxel_spacing_4d)
265
+ else:
266
+ affine = None
267
+
268
+ # Create TorchIO subject with affine (not spacing!)
269
+ subject = tio.Subject(
270
+ image=tio.ScalarImage(tensor=volume_tio, affine=affine)
271
+ )
272
+
273
+ # Apply resampling
274
+ resampled = self.resample_transform(subject)
275
+
276
+ # Permute back: (C, H, W, D) -> (C, D, H, W)
277
+ result = resampled['image'].data.permute(0, 3, 1, 2)
278
+
279
+ # Return with original number of dimensions
280
+ if squeeze_output:
281
+ return result.squeeze(0)
282
+ else:
283
+ return result
284
+
285
+ def pad_or_crop_volume(self, volume: torch.Tensor) -> torch.Tensor:
286
+ """
287
+ Pad or crop volume to target shape.
288
+
289
+ Args:
290
+ volume: 3D or 4D volume tensor (D, H, W) or (C, D, H, W).
291
+
292
+ Returns:
293
+ Padded/cropped volume with same number of dimensions.
294
+ """
295
+ # Handle both 3D (D, H, W) and 4D (C, D, H, W) volumes
296
+ if len(volume.shape) == 3:
297
+ # Single channel: (D, H, W) -> (1, D, H, W)
298
+ volume_4d = volume.unsqueeze(0)
299
+ squeeze_output = True
300
+ elif len(volume.shape) == 4:
301
+ # Multi-channel: (C, D, H, W) - already has channel dim
302
+ volume_4d = volume
303
+ squeeze_output = False
304
+ else:
305
+ raise ValueError(f"Expected 3D or 4D volume, got shape {volume.shape}")
306
+
307
+ # Permute to TorchIO format: (C, D, H, W) -> (C, H, W, D)
308
+ volume_tio = volume_4d.permute(0, 2, 3, 1)
309
+
310
+ # Create TorchIO subject
311
+ subject = tio.Subject(
312
+ image=tio.ScalarImage(tensor=volume_tio)
313
+ )
314
+
315
+ # Apply padding/cropping
316
+ transformed = self.padding_transform(subject)
317
+
318
+ # Permute back: (C, H, W, D) -> (C, D, H, W)
319
+ result = transformed['image'].data.permute(0, 3, 1, 2)
320
+
321
+ # Return with original number of dimensions
322
+ if squeeze_output:
323
+ return result.squeeze(0)
324
+ else:
325
+ return result
326
+
327
+ def preprocess(
328
+ self,
329
+ images: Union[List[str], np.ndarray, torch.Tensor],
330
+ file_type: str = "dicom",
331
+ voxel_spacing: Optional[List[float]] = None,
332
+ return_tensors: Optional[Union[str, TensorType]] = None,
333
+ **kwargs
334
+ ) -> BatchFeature:
335
+ """
336
+ Preprocess CT scan images.
337
+
338
+ Args:
339
+ images: Either list of file paths or numpy/torch array of images.
340
+ file_type: Type of input files ("dicom" or "png").
341
+ voxel_spacing: Original voxel spacing (required for PNG files).
342
+ return_tensors: The type of tensors to return.
343
+
344
+ Returns:
345
+ BatchFeature with preprocessed images.
346
+ """
347
+ # Load images if paths are provided
348
+ if isinstance(images, list) and isinstance(images[0], str):
349
+ if file_type == "dicom":
350
+ volume, metadata = self.load_dicom_series(images)
351
+ if voxel_spacing is None and metadata["pixel_spacing"]:
352
+ voxel_spacing = metadata["pixel_spacing"] + [metadata["slice_thickness"]]
353
+ elif file_type == "png":
354
+ if voxel_spacing is None:
355
+ raise ValueError("voxel_spacing must be provided for PNG files")
356
+ volume = self.load_png_series(images)
357
+ else:
358
+ raise ValueError(f"Unknown file type: {file_type}")
359
+ elif isinstance(images, (np.ndarray, torch.Tensor)):
360
+ volume = images
361
+ else:
362
+ raise ValueError("Images must be file paths, numpy array, or torch tensor")
363
+
364
+ # Ensure volume is numpy array for initial processing
365
+ if isinstance(volume, torch.Tensor):
366
+ volume_np = volume.numpy()
367
+ else:
368
+ volume_np = volume
369
+
370
+ # Apply windowing
371
+ volume_np = self.apply_windowing(volume_np)
372
+
373
+ # Resize each slice to 256x256 (matching original Sybil's per-slice resize)
374
+ volume_np = self.resize_slices(volume_np, target_size=self.default_size)
375
+
376
+ # NOTE: Original Sybil uses the ORIGINAL voxel spacing from DICOM metadata
377
+ # even after resizing slices. This is physically incorrect (spacing should be
378
+ # adjusted for the resize factor), but we match the original behavior here.
379
+ # The voxel_spacing remains unchanged from DICOM metadata.
380
+
381
+ # Convert to torch tensor for remaining operations
382
+ volume = torch.from_numpy(volume_np).float()
383
+
384
+ # Apply normalization BEFORE resampling (to match original Sybil)
385
+ # Original Sybil normalizes each slice before assembly and 3D resampling
386
+ # This ensures 3D interpolation happens on normalized values, not [0, 255] values
387
+ # These values come from the original Sybil implementation's computed mean/std
388
+ # on 8-bit windowed images [0, 255]
389
+ img_mean = 128.1722
390
+ img_std = 87.1849
391
+ volume = (volume - img_mean) / img_std
392
+
393
+ # Replicate to 3 channels BEFORE resampling (to match original Sybil)
394
+ # Original Sybil replicates channels per-slice, then assembles 3-channel volume
395
+ # Shape: (D, H, W) -> (3, D, H, W)
396
+ volume = volume.unsqueeze(0).repeat(3, 1, 1, 1) # Now (3, D, H, W)
397
+
398
+ # Resample if spacing is provided (3D resampling for voxel spacing adjustment)
399
+ # This happens on 3-channel volume, matching original Sybil
400
+ if voxel_spacing is not None:
401
+ volume = self.resample_volume(volume, voxel_spacing)
402
+
403
+ # Pad or crop to target shape (on 3-channel volume)
404
+ volume = self.pad_or_crop_volume(volume)
405
+
406
+ # Add batch dimension to match original Sybil output shape [1, C, D, H, W]
407
+ volume = volume.unsqueeze(0) # Now (1, 3, D, H, W)
408
+
409
+ # Prepare output
410
+ data = {"pixel_values": volume}
411
+
412
+ # Convert to requested tensor type
413
+ if return_tensors == "pt":
414
+ return BatchFeature(data=data, tensor_type=TensorType.PYTORCH)
415
+ elif return_tensors == "np":
416
+ data = {k: v.numpy() for k, v in data.items()}
417
+ return BatchFeature(data=data, tensor_type=TensorType.NUMPY)
418
+ else:
419
+ return BatchFeature(data=data)
420
+
421
+ def __call__(
422
+ self,
423
+ images: Union[List[str], List[List[str]], np.ndarray, torch.Tensor],
424
+ **kwargs
425
+ ) -> BatchFeature:
426
+ """
427
+ Main method to prepare images for the model.
428
+
429
+ Args:
430
+ images: Images to preprocess. Can be:
431
+ - List of file paths for a single series
432
+ - List of lists of file paths for multiple series
433
+ - Numpy array or torch tensor
434
+
435
+ Returns:
436
+ BatchFeature with preprocessed images ready for model input.
437
+ """
438
+ # Handle batch processing
439
+ if isinstance(images, list) and images and isinstance(images[0], list):
440
+ # Multiple series
441
+ batch_volumes = []
442
+ for series_paths in images:
443
+ result = self.preprocess(series_paths, **kwargs)
444
+ batch_volumes.append(result["pixel_values"])
445
+
446
+ # Stack into batch (B, C, D, H, W)
447
+ pixel_values = torch.stack(batch_volumes)
448
+ return BatchFeature(data={"pixel_values": pixel_values})
449
+ else:
450
+ # Single series
451
+ return self.preprocess(images, **kwargs)
modeling_sybil.py ADDED
@@ -0,0 +1,603 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """PyTorch Sybil model for lung cancer risk prediction"""
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+ import torchvision
6
+ from transformers import PreTrainedModel
7
+ from transformers.modeling_outputs import BaseModelOutput
8
+ from typing import Optional, Dict, List, Tuple
9
+ import numpy as np
10
+ from dataclasses import dataclass
11
+
12
+ try:
13
+ from .configuration_sybil import SybilConfig
14
+ except ImportError:
15
+ from configuration_sybil import SybilConfig
16
+
17
+
18
+ @dataclass
19
+ class SybilOutput(BaseModelOutput):
20
+ """
21
+ Base class for Sybil model outputs.
22
+
23
+ Args:
24
+ risk_scores: (`torch.FloatTensor` of shape `(batch_size, max_followup)`):
25
+ Predicted risk scores for each year up to max_followup.
26
+ image_attention: (`torch.FloatTensor` of shape `(batch_size, num_slices, height, width)`, *optional*):
27
+ Attention weights over image pixels.
28
+ volume_attention: (`torch.FloatTensor` of shape `(batch_size, num_slices)`, *optional*):
29
+ Attention weights over CT scan slices.
30
+ hidden_states: (`torch.FloatTensor` of shape `(batch_size, hidden_dim)`, *optional*):
31
+ Hidden states from the pooling layer.
32
+ """
33
+ risk_scores: torch.FloatTensor = None
34
+ image_attention: Optional[torch.FloatTensor] = None
35
+ volume_attention: Optional[torch.FloatTensor] = None
36
+ hidden_states: Optional[torch.FloatTensor] = None
37
+
38
+
39
+ class CumulativeProbabilityLayer(nn.Module):
40
+ """
41
+ Cumulative probability layer for survival prediction.
42
+
43
+ Matches the original Sybil implementation exactly with:
44
+ - hazard_fc: Year-specific hazards (can be zero after ReLU)
45
+ - base_hazard_fc: Base hazard shared across all years
46
+ - Triangular masking for cumulative hazard computation
47
+ """
48
+
49
+ def __init__(self, hidden_dim: int, max_followup: int = 6):
50
+ super().__init__()
51
+ self.max_followup = max_followup
52
+
53
+ # Year-specific hazards
54
+ self.hazard_fc = nn.Linear(hidden_dim, max_followup)
55
+
56
+ # Base hazard (shared across years)
57
+ self.base_hazard_fc = nn.Linear(hidden_dim, 1)
58
+
59
+ self.relu = nn.ReLU(inplace=True)
60
+
61
+ # Upper triangular mask for cumulative computation
62
+ mask = torch.ones([max_followup, max_followup])
63
+ mask = torch.tril(mask, diagonal=0)
64
+ mask = torch.nn.Parameter(torch.t(mask), requires_grad=False)
65
+ self.register_parameter("upper_triangular_mask", mask)
66
+
67
+ def hazards(self, x):
68
+ """Compute positive hazards using ReLU"""
69
+ raw_hazard = self.hazard_fc(x)
70
+ pos_hazard = self.relu(raw_hazard)
71
+ return pos_hazard
72
+
73
+ def forward(self, x):
74
+ """
75
+ Compute cumulative probabilities matching original Sybil.
76
+
77
+ Args:
78
+ x: Hidden features [B, hidden_dim]
79
+
80
+ Returns:
81
+ Cumulative probabilities [B, max_followup]
82
+ """
83
+ hazards = self.hazards(x)
84
+ B, T = hazards.size()
85
+
86
+ # Expand for masking: [B, T] -> [B, T, T]
87
+ expanded_hazards = hazards.unsqueeze(-1).expand(B, T, T)
88
+
89
+ # Apply triangular mask for cumulative sum
90
+ masked_hazards = expanded_hazards * self.upper_triangular_mask
91
+
92
+ # Base hazard (shared across years)
93
+ base_hazard = self.base_hazard_fc(x)
94
+
95
+ # Sum masked hazards and add base
96
+ cum_prob = torch.sum(masked_hazards, dim=1) + base_hazard
97
+
98
+ return cum_prob
99
+
100
+
101
+ class GlobalMaxPool(nn.Module):
102
+ """Pool to obtain the maximum value for each channel"""
103
+
104
+ def __init__(self):
105
+ super(GlobalMaxPool, self).__init__()
106
+
107
+ def forward(self, x):
108
+ """
109
+ Args:
110
+ - x: tensor of shape (B, C, T, W, H)
111
+ Returns:
112
+ - output: dict. output['hidden'] is (B, C)
113
+ """
114
+ spatially_flat_size = (*x.size()[:2], -1)
115
+ x = x.view(spatially_flat_size)
116
+ hidden, _ = torch.max(x, dim=-1)
117
+ return {'hidden': hidden}
118
+
119
+
120
+ class PerFrameMaxPool(nn.Module):
121
+ """Pool to obtain the maximum value for each slice in 3D input"""
122
+
123
+ def __init__(self):
124
+ super(PerFrameMaxPool, self).__init__()
125
+
126
+ def forward(self, x):
127
+ """
128
+ Args:
129
+ - x: tensor of shape (B, C, T, W, H)
130
+ Returns:
131
+ - output: dict.
132
+ + output['multi_image_hidden'] is (B, C, T)
133
+ """
134
+ assert len(x.shape) == 5
135
+ output = {}
136
+ spatially_flat_size = (*x.size()[:3], -1)
137
+ x = x.view(spatially_flat_size)
138
+ output['multi_image_hidden'], _ = torch.max(x, dim=-1)
139
+ return output
140
+
141
+
142
+ class Simple_AttentionPool(nn.Module):
143
+ """Pool to learn an attention over the slices"""
144
+
145
+ def __init__(self, **kwargs):
146
+ super(Simple_AttentionPool, self).__init__()
147
+ self.attention_fc = nn.Linear(kwargs['num_chan'], 1)
148
+ self.softmax = nn.Softmax(dim=-1)
149
+ self.logsoftmax = nn.LogSoftmax(dim=-1)
150
+
151
+ def forward(self, x):
152
+ """
153
+ Args:
154
+ - x: tensor of shape (B, C, N)
155
+ Returns:
156
+ - output: dict
157
+ + output['volume_attention']: tensor (B, N)
158
+ + output['hidden']: tensor (B, C)
159
+ """
160
+ output = {}
161
+ B = x.shape[0]
162
+ spatially_flat_size = (*x.size()[:2], -1) # B, C, N
163
+
164
+ x = x.view(spatially_flat_size)
165
+ attention_scores = self.attention_fc(x.transpose(1, 2)) # B, N, 1
166
+
167
+ output['volume_attention'] = self.logsoftmax(attention_scores.transpose(1, 2)).view(B, -1)
168
+ attention_scores = self.softmax(attention_scores.transpose(1, 2)) # B, 1, N
169
+
170
+ x = x * attention_scores # B, C, N
171
+ output['hidden'] = torch.sum(x, dim=-1)
172
+ return output
173
+
174
+
175
+ class Simple_AttentionPool_MultiImg(nn.Module):
176
+ """Pool to learn an attention over the slices and the volume"""
177
+
178
+ def __init__(self, **kwargs):
179
+ super(Simple_AttentionPool_MultiImg, self).__init__()
180
+ self.attention_fc = nn.Linear(kwargs['num_chan'], 1)
181
+ self.softmax = nn.Softmax(dim=-1)
182
+ self.logsoftmax = nn.LogSoftmax(dim=-1)
183
+
184
+ def forward(self, x):
185
+ """
186
+ Args:
187
+ - x: tensor of shape (B, C, T, W, H)
188
+ Returns:
189
+ - output: dict
190
+ + output['image_attention']: tensor (B, T, W*H)
191
+ + output['multi_image_hidden']: tensor (B, C, T)
192
+ + output['hidden']: tensor (B, T*C)
193
+ """
194
+ output = {}
195
+ B, C, T, W, H = x.size()
196
+ x = x.permute([0, 2, 1, 3, 4])
197
+ x = x.contiguous().view(B*T, C, W*H)
198
+ attention_scores = self.attention_fc(x.transpose(1, 2)) # BT, WH, 1
199
+
200
+ output['image_attention'] = self.logsoftmax(attention_scores.transpose(1, 2)).view(B, T, -1)
201
+ attention_scores = self.softmax(attention_scores.transpose(1, 2)) # BT, 1, WH
202
+
203
+ x = x * attention_scores # BT, C, WH
204
+ x = torch.sum(x, dim=-1)
205
+ output['multi_image_hidden'] = x.view(B, T, C).permute([0, 2, 1]).contiguous()
206
+ output['hidden'] = x.view(B, T * C)
207
+ return output
208
+
209
+
210
+ class Conv1d_AttnPool(nn.Module):
211
+ """Pool to learn an attention over the slices after convolution"""
212
+
213
+ def __init__(self, **kwargs):
214
+ super(Conv1d_AttnPool, self).__init__()
215
+ self.conv1d = nn.Conv1d(
216
+ kwargs['num_chan'],
217
+ kwargs['num_chan'],
218
+ kernel_size=kwargs['conv_pool_kernel_size'],
219
+ stride=kwargs['stride'],
220
+ padding=kwargs['conv_pool_kernel_size']//2,
221
+ bias=False
222
+ )
223
+ self.aggregate = Simple_AttentionPool(**kwargs)
224
+
225
+ def forward(self, x):
226
+ """
227
+ Args:
228
+ - x: tensor of shape (B, C, T)
229
+ Returns:
230
+ - output: dict
231
+ + output['attention_scores']: tensor (B, C)
232
+ + output['hidden']: tensor (B, C)
233
+ """
234
+ # X: B, C, N
235
+ x = self.conv1d(x) # B, C, N'
236
+ return self.aggregate(x)
237
+
238
+
239
+ class MultiAttentionPool(nn.Module):
240
+ """Multi-attention pooling layer for CT scan aggregation - matches original Sybil architecture"""
241
+
242
+ def __init__(self, channels: int = 512):
243
+ super().__init__()
244
+ params = {
245
+ 'num_chan': 512,
246
+ 'conv_pool_kernel_size': 11,
247
+ 'stride': 1
248
+ }
249
+
250
+ # Define all pooling sub-modules matching original Sybil
251
+ self.image_pool1 = Simple_AttentionPool_MultiImg(**params)
252
+ self.volume_pool1 = Simple_AttentionPool(**params)
253
+ self.image_pool2 = PerFrameMaxPool()
254
+ self.volume_pool2 = Conv1d_AttnPool(**params)
255
+ self.global_max_pool = GlobalMaxPool()
256
+
257
+ # Final linear layers to combine features
258
+ self.multi_img_hidden_fc = nn.Linear(2 * 512, 512)
259
+ self.hidden_fc = nn.Linear(3 * 512, 512)
260
+
261
+ def forward(self, x):
262
+ """
263
+ Args:
264
+ x: tensor of shape (B, C, T, W, H) where
265
+ - B: batch size
266
+ - C: channels (512)
267
+ - T: temporal/depth dimension (slices)
268
+ - W, H: spatial dimensions
269
+
270
+ Returns:
271
+ output: dict with keys:
272
+ - 'hidden': (B, 512) - final aggregated features
273
+ - 'image_attention_1': (B, T, W*H) - image attention scores
274
+ - 'volume_attention_1': (B, T) - volume attention scores
275
+ - 'image_attention_2': None (no attention for max pool)
276
+ - 'volume_attention_2': (B, T) - volume attention scores
277
+ - 'multi_image_hidden': (B, 512, T) - intermediate features
278
+ - 'maxpool_hidden': (B, 512) - max pooled features
279
+ """
280
+ output = {}
281
+
282
+ # First attention pooling pathway
283
+ image_pool_out1 = self.image_pool1(x)
284
+ # Keys: "multi_image_hidden" (B, C, T), "image_attention" (B, T, W*H), "hidden" (B, T*C)
285
+
286
+ volume_pool_out1 = self.volume_pool1(image_pool_out1['multi_image_hidden'])
287
+ # Keys: "hidden" (B, C), "volume_attention" (B, T)
288
+
289
+ # Second max pooling pathway
290
+ image_pool_out2 = self.image_pool2(x)
291
+ # Keys: "multi_image_hidden" (B, C, T)
292
+
293
+ volume_pool_out2 = self.volume_pool2(image_pool_out2['multi_image_hidden'])
294
+ # Keys: "hidden" (B, C), "volume_attention" (B, T)
295
+
296
+ # Collect all pooling outputs with numbered suffixes
297
+ for pool_out, num in [(image_pool_out1, 1), (volume_pool_out1, 1),
298
+ (image_pool_out2, 2), (volume_pool_out2, 2)]:
299
+ for key, val in pool_out.items():
300
+ output['{}_{}'.format(key, num)] = val
301
+
302
+ # Global max pooling
303
+ maxpool_out = self.global_max_pool(x)
304
+ output['maxpool_hidden'] = maxpool_out['hidden']
305
+
306
+ # Combine multi-image features from both pathways
307
+ multi_image_hidden = torch.cat(
308
+ [image_pool_out1['multi_image_hidden'], image_pool_out2['multi_image_hidden']],
309
+ dim=-2
310
+ ) # (B, C, 2*T)
311
+ output['multi_image_hidden'] = self.multi_img_hidden_fc(
312
+ multi_image_hidden.permute([0, 2, 1]).contiguous()
313
+ ).permute([0, 2, 1]).contiguous() # (B, 512, T)
314
+
315
+ # Combine all volume-level features
316
+ hidden = torch.cat(
317
+ [volume_pool_out1['hidden'], volume_pool_out2['hidden'], output['maxpool_hidden']],
318
+ dim=-1
319
+ ) # (B, 3*512)
320
+ output['hidden'] = self.hidden_fc(hidden) # (B, 512)
321
+
322
+ return output
323
+
324
+
325
+ class SybilPreTrainedModel(PreTrainedModel):
326
+ """
327
+ An abstract class to handle weights initialization and a simple interface
328
+ for downloading and loading pretrained models.
329
+ """
330
+ config_class = SybilConfig
331
+ base_model_prefix = "sybil"
332
+ supports_gradient_checkpointing = False
333
+
334
+ def _init_weights(self, module):
335
+ """Initialize the weights"""
336
+ if isinstance(module, nn.Linear):
337
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
338
+ if module.bias is not None:
339
+ module.bias.data.zero_()
340
+ elif isinstance(module, nn.Conv3d):
341
+ nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu')
342
+ if module.bias is not None:
343
+ module.bias.data.zero_()
344
+
345
+
346
+ class SybilForRiskPrediction(SybilPreTrainedModel):
347
+ """
348
+ Sybil model for lung cancer risk prediction from CT scans.
349
+
350
+ This model takes 3D CT scan volumes as input and predicts cancer risk scores
351
+ for multiple future time points (typically 1-6 years).
352
+ """
353
+
354
+ def __init__(self, config: SybilConfig):
355
+ super().__init__(config)
356
+ self.config = config
357
+
358
+ # Use pretrained R3D-18 as backbone
359
+ encoder = torchvision.models.video.r3d_18(pretrained=True)
360
+ self.image_encoder = nn.Sequential(*list(encoder.children())[:-2])
361
+
362
+ # Multi-attention pooling
363
+ self.pool = MultiAttentionPool(channels=512)
364
+
365
+ # Classification layers
366
+ self.relu = nn.ReLU(inplace=False)
367
+ self.dropout = nn.Dropout(p=config.dropout)
368
+
369
+ # Risk prediction layer
370
+ self.prob_of_failure_layer = CumulativeProbabilityLayer(
371
+ config.hidden_dim,
372
+ max_followup=config.max_followup
373
+ )
374
+
375
+ # Calibrator for ensemble predictions
376
+ self.calibrator = None
377
+ if config.calibrator_data:
378
+ self.set_calibrator(config.calibrator_data)
379
+
380
+ # Initialize weights
381
+ self.post_init()
382
+
383
+ def set_calibrator(self, calibrator_data: Dict):
384
+ """Set calibration data for risk score adjustment"""
385
+ self.calibrator = calibrator_data
386
+
387
+ def _calibrate_scores(self, scores: torch.Tensor) -> torch.Tensor:
388
+ """Apply calibration to raw risk scores"""
389
+ if self.calibrator is None:
390
+ return scores
391
+
392
+ # Convert to numpy for calibration
393
+ scores_np = scores.detach().cpu().numpy()
394
+ calibrated = np.zeros_like(scores_np)
395
+
396
+ # Apply calibration for each year
397
+ for year in range(scores_np.shape[1]):
398
+ year_key = f"Year{year + 1}"
399
+ if year_key in self.calibrator:
400
+ # Apply calibration transformation
401
+ calibrated[:, year] = self._apply_calibration(
402
+ scores_np[:, year],
403
+ self.calibrator[year_key]
404
+ )
405
+ else:
406
+ calibrated[:, year] = scores_np[:, year]
407
+
408
+ return torch.from_numpy(calibrated).to(scores.device)
409
+
410
+ def _apply_calibration(self, scores: np.ndarray, calibrator_params: Dict) -> np.ndarray:
411
+ """Apply specific calibration transformation"""
412
+ # Simplified calibration - in practice, this would use the full calibration model
413
+ # from the original Sybil implementation
414
+ return scores # Placeholder for now
415
+
416
+ def forward(
417
+ self,
418
+ pixel_values: torch.FloatTensor,
419
+ return_attentions: bool = False,
420
+ return_dict: bool = True,
421
+ ) -> SybilOutput:
422
+ """
423
+ Forward pass of the Sybil model.
424
+
425
+ Args:
426
+ pixel_values: (`torch.FloatTensor` of shape `(batch_size, channels, depth, height, width)`):
427
+ Pixel values of CT scan volumes.
428
+ return_attentions: (`bool`, *optional*, defaults to `False`):
429
+ Whether to return attention weights.
430
+ return_dict: (`bool`, *optional*, defaults to `True`):
431
+ Whether to return a `SybilOutput` instead of a plain tuple.
432
+
433
+ Returns:
434
+ `SybilOutput` or tuple
435
+ """
436
+ # Extract features using 3D CNN backbone
437
+ features = self.image_encoder(pixel_values)
438
+
439
+ # Apply multi-attention pooling
440
+ pool_output = self.pool(features)
441
+
442
+ # Apply ReLU and dropout
443
+ hidden = self.relu(pool_output['hidden'])
444
+ hidden = self.dropout(hidden)
445
+
446
+ # Predict risk scores
447
+ risk_logits = self.prob_of_failure_layer(hidden)
448
+ risk_scores = torch.sigmoid(risk_logits)
449
+
450
+ # Apply calibration if available
451
+ risk_scores = self._calibrate_scores(risk_scores)
452
+
453
+ if not return_dict:
454
+ outputs = (risk_scores,)
455
+ if return_attentions:
456
+ outputs = outputs + (pool_output.get('image_attention_1'),
457
+ pool_output.get('volume_attention_1'))
458
+ return outputs
459
+
460
+ return SybilOutput(
461
+ risk_scores=risk_scores,
462
+ image_attention=pool_output.get('image_attention_1') if return_attentions else None,
463
+ volume_attention=pool_output.get('volume_attention_1') if return_attentions else None,
464
+ hidden_states=hidden if return_attentions else None
465
+ )
466
+
467
+ @classmethod
468
+ def from_pretrained_ensemble(
469
+ cls,
470
+ pretrained_model_name_or_path,
471
+ checkpoint_paths: List[str],
472
+ calibrator_path: Optional[str] = None,
473
+ **kwargs
474
+ ):
475
+ """
476
+ Load an ensemble of Sybil models from checkpoints.
477
+
478
+ Args:
479
+ pretrained_model_name_or_path: Path to the pretrained model or model identifier.
480
+ checkpoint_paths: List of paths to individual model checkpoints.
481
+ calibrator_path: Path to calibration data.
482
+ **kwargs: Additional keyword arguments for model initialization.
483
+
484
+ Returns:
485
+ SybilEnsemble: An ensemble of Sybil models.
486
+ """
487
+ config = kwargs.pop("config", None)
488
+ if config is None:
489
+ config = SybilConfig.from_pretrained(pretrained_model_name_or_path)
490
+
491
+ # Load calibrator if provided
492
+ calibrator_data = None
493
+ if calibrator_path:
494
+ import json
495
+ with open(calibrator_path, 'r') as f:
496
+ calibrator_data = json.load(f)
497
+ config.calibrator_data = calibrator_data
498
+
499
+ # Create ensemble
500
+ models = []
501
+ for checkpoint_path in checkpoint_paths:
502
+ model = cls(config)
503
+ # Load checkpoint weights
504
+ checkpoint = torch.load(checkpoint_path, map_location='cpu')
505
+ # Remove 'model.' prefix from state dict keys if present
506
+ state_dict = {}
507
+ for k, v in checkpoint['state_dict'].items():
508
+ if k.startswith('model.'):
509
+ state_dict[k[6:]] = v
510
+ else:
511
+ state_dict[k] = v
512
+
513
+ # Map to new model structure
514
+ mapped_state_dict = model._map_checkpoint_weights(state_dict)
515
+ model.load_state_dict(mapped_state_dict, strict=False)
516
+ models.append(model)
517
+
518
+ return SybilEnsemble(models, config)
519
+
520
+ def _map_checkpoint_weights(self, state_dict: Dict) -> Dict:
521
+ """Map original Sybil checkpoint weights to new structure"""
522
+ mapped = {}
523
+
524
+ # Map encoder weights
525
+ for k, v in state_dict.items():
526
+ if k.startswith('image_encoder'):
527
+ mapped[k] = v
528
+ elif k.startswith('pool'):
529
+ # Map pooling layer weights
530
+ mapped[k] = v
531
+ elif k.startswith('prob_of_failure_layer'):
532
+ # Map final prediction layer
533
+ mapped[k] = v
534
+
535
+ return mapped
536
+
537
+
538
+ class SybilEnsemble:
539
+ """Ensemble of Sybil models for improved predictions"""
540
+
541
+ def __init__(self, models: List[SybilForRiskPrediction], config: SybilConfig):
542
+ self.models = models
543
+ self.config = config
544
+ self.device = None
545
+
546
+ def to(self, device):
547
+ """Move all models to device"""
548
+ self.device = device
549
+ for model in self.models:
550
+ model.to(device)
551
+ return self
552
+
553
+ def eval(self):
554
+ """Set all models to evaluation mode"""
555
+ for model in self.models:
556
+ model.eval()
557
+
558
+ def __call__(
559
+ self,
560
+ pixel_values: torch.FloatTensor,
561
+ return_attentions: bool = False,
562
+ ) -> SybilOutput:
563
+ """
564
+ Run inference with ensemble voting.
565
+
566
+ Args:
567
+ pixel_values: Input CT scan volumes.
568
+ return_attentions: Whether to return attention maps.
569
+
570
+ Returns:
571
+ SybilOutput with averaged predictions from all models.
572
+ """
573
+ all_risk_scores = []
574
+ all_image_attentions = []
575
+ all_volume_attentions = []
576
+
577
+ with torch.no_grad():
578
+ for model in self.models:
579
+ output = model(
580
+ pixel_values=pixel_values,
581
+ return_attentions=return_attentions
582
+ )
583
+ all_risk_scores.append(output.risk_scores)
584
+
585
+ if return_attentions:
586
+ all_image_attentions.append(output.image_attention)
587
+ all_volume_attentions.append(output.volume_attention)
588
+
589
+ # Average predictions
590
+ risk_scores = torch.stack(all_risk_scores).mean(dim=0)
591
+
592
+ # Average attentions if requested
593
+ image_attention = None
594
+ volume_attention = None
595
+ if return_attentions:
596
+ image_attention = torch.stack(all_image_attentions).mean(dim=0)
597
+ volume_attention = torch.stack(all_volume_attentions).mean(dim=0)
598
+
599
+ return SybilOutput(
600
+ risk_scores=risk_scores,
601
+ image_attention=image_attention,
602
+ volume_attention=volume_attention
603
+ )
modeling_sybil_hf.py ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Self-contained Hugging Face wrapper for Sybil lung cancer risk prediction model.
3
+ This version works directly from HF without requiring external Sybil package.
4
+ """
5
+
6
+ import os
7
+ import json
8
+ import sys
9
+ import torch
10
+ import numpy as np
11
+ from typing import List, Dict, Optional
12
+ from dataclasses import dataclass
13
+ from transformers.modeling_outputs import BaseModelOutput
14
+ from safetensors.torch import load_file
15
+
16
+ # Add model path to sys.path for imports
17
+ current_dir = os.path.dirname(os.path.abspath(__file__))
18
+ if current_dir not in sys.path:
19
+ sys.path.insert(0, current_dir)
20
+
21
+ try:
22
+ from .configuration_sybil import SybilConfig
23
+ from .modeling_sybil import SybilForRiskPrediction
24
+ from .image_processing_sybil import SybilImageProcessor
25
+ except ImportError:
26
+ from configuration_sybil import SybilConfig
27
+ from modeling_sybil import SybilForRiskPrediction
28
+ from image_processing_sybil import SybilImageProcessor
29
+
30
+
31
+ @dataclass
32
+ class SybilOutput(BaseModelOutput):
33
+ """
34
+ Output class for Sybil model predictions.
35
+
36
+ Args:
37
+ risk_scores: Risk scores for each year (1-6 years by default)
38
+ attentions: Optional attention maps if requested
39
+ """
40
+ risk_scores: torch.FloatTensor = None
41
+ attentions: Optional[Dict] = None
42
+
43
+
44
+ class SybilHFWrapper:
45
+ """
46
+ Hugging Face wrapper for Sybil ensemble model.
47
+ Provides a simple interface for lung cancer risk prediction from CT scans.
48
+ """
49
+
50
+ def __init__(self, config: SybilConfig = None, model_dir: str = None):
51
+ """
52
+ Initialize the Sybil model ensemble.
53
+
54
+ Args:
55
+ config: Model configuration (will use default if not provided)
56
+ model_dir: Directory containing model files (defaults to file location)
57
+ """
58
+ self.config = config if config is not None else SybilConfig()
59
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
60
+
61
+ # Get the directory where model files are located
62
+ if model_dir is not None:
63
+ self.model_dir = model_dir
64
+ else:
65
+ # Default to where this file is located
66
+ self.model_dir = os.path.dirname(os.path.abspath(__file__))
67
+
68
+ # Initialize image processor
69
+ self.image_processor = SybilImageProcessor()
70
+
71
+ # Load calibrator
72
+ self.calibrator = self._load_calibrator()
73
+
74
+ # Load ensemble models
75
+ self.models = self._load_ensemble_models()
76
+
77
+ def _load_calibrator(self) -> Dict:
78
+ """Load ensemble calibrator data"""
79
+ calibrator_path = os.path.join(self.model_dir, "checkpoints", "sybil_ensemble_simple_calibrator.json")
80
+
81
+ if os.path.exists(calibrator_path):
82
+ with open(calibrator_path, 'r') as f:
83
+ return json.load(f)
84
+ else:
85
+ # Try alternative location
86
+ calibrator_path = os.path.join(self.model_dir, "calibrator_data.json")
87
+ if os.path.exists(calibrator_path):
88
+ with open(calibrator_path, 'r') as f:
89
+ return json.load(f)
90
+ return {}
91
+
92
+ def _load_ensemble_models(self) -> List[torch.nn.Module]:
93
+ """
94
+ Load all models in the ensemble from original checkpoints.
95
+
96
+ Note: We load from .ckpt files instead of safetensors because the safetensors
97
+ were created with the wrong CumulativeProbabilityLayer architecture.
98
+ """
99
+ import glob as glob_module
100
+ models = []
101
+
102
+ # Find all .ckpt files in checkpoints directory
103
+ checkpoints_dir = os.path.join(self.model_dir, "checkpoints")
104
+ checkpoint_files = sorted(glob_module.glob(os.path.join(checkpoints_dir, "*.ckpt")))
105
+
106
+ print(f"Found {len(checkpoint_files)} checkpoint files")
107
+
108
+ # Load each checkpoint file
109
+ for checkpoint_path in checkpoint_files:
110
+ try:
111
+ model = SybilForRiskPrediction(self.config)
112
+ checkpoint = torch.load(checkpoint_path, map_location='cpu', weights_only=False)
113
+
114
+ # Extract state dict
115
+ if 'state_dict' in checkpoint:
116
+ state_dict = checkpoint['state_dict']
117
+ else:
118
+ state_dict = checkpoint
119
+
120
+ # Remove 'model.' prefix if present
121
+ cleaned_state_dict = {}
122
+ for k, v in state_dict.items():
123
+ if k.startswith('model.'):
124
+ cleaned_state_dict[k[6:]] = v
125
+ else:
126
+ cleaned_state_dict[k] = v
127
+
128
+ # Load weights
129
+ model.load_state_dict(cleaned_state_dict, strict=False)
130
+ model.to(self.device)
131
+ model.eval()
132
+ models.append(model)
133
+ print(f" Loaded model from {os.path.basename(checkpoint_path)}")
134
+ except Exception as e:
135
+ print(f" Warning: Could not load {os.path.basename(checkpoint_path)}: {e}")
136
+ continue
137
+
138
+ if not models:
139
+ raise ValueError("No models could be loaded from the ensemble. Please ensure model files are present.")
140
+
141
+ print(f"Loaded {len(models)} models in ensemble")
142
+ return models
143
+
144
+ def _apply_calibration(self, scores: np.ndarray) -> np.ndarray:
145
+ """
146
+ Apply complete isotonic regression calibration matching the original Sybil implementation.
147
+
148
+ This method applies the same calibration as the original SimpleClassifierGroup.predict_proba:
149
+ 1. For each year, apply each calibrator in the ensemble
150
+ 2. Each calibrator applies: linear transform -> clip -> isotonic regression (np.interp)
151
+ 3. Average predictions from all calibrators
152
+
153
+ Args:
154
+ scores: Raw risk scores from the model (shape: [batch_size, num_years])
155
+
156
+ Returns:
157
+ Calibrated risk scores (shape: [batch_size, num_years])
158
+ """
159
+ if not self.calibrator:
160
+ return scores
161
+
162
+ calibrated_scores = []
163
+
164
+ for year in range(scores.shape[1]):
165
+ year_key = f"Year{year + 1}"
166
+
167
+ if year_key not in self.calibrator:
168
+ # No calibrator for this year, use raw scores
169
+ calibrated_scores.append(scores[:, year])
170
+ continue
171
+
172
+ cal_list = self.calibrator[year_key]
173
+
174
+ if not isinstance(cal_list, list) or len(cal_list) == 0:
175
+ # Invalid calibrator format, use raw scores
176
+ calibrated_scores.append(scores[:, year])
177
+ continue
178
+
179
+ # Apply each calibrator and collect predictions
180
+ year_predictions = []
181
+
182
+ for cal_data in cal_list:
183
+ if not isinstance(cal_data, dict):
184
+ continue
185
+
186
+ # Extract calibration parameters
187
+ if "coef" not in cal_data or "intercept" not in cal_data:
188
+ continue
189
+
190
+ coef = np.array(cal_data["coef"]) # Shape: [[scalar]]
191
+ intercept = np.array(cal_data["intercept"]) # Shape: [scalar]
192
+
193
+ # Extract isotonic regression points
194
+ if "x0" not in cal_data or "y0" not in cal_data:
195
+ continue
196
+
197
+ x0 = np.array(cal_data["x0"])
198
+ y0 = np.array(cal_data["y0"])
199
+
200
+ # Extract clipping bounds
201
+ x_min = cal_data.get("x_min", -np.inf)
202
+ x_max = cal_data.get("x_max", np.inf)
203
+
204
+ # Apply complete calibration pipeline:
205
+ # Step 1: Linear transformation
206
+ probs = scores[:, year].reshape(-1, 1) # Shape: [batch_size, 1]
207
+ T = probs @ coef + intercept # Matrix multiplication
208
+ T = T.flatten() # Shape: [batch_size]
209
+
210
+ # Step 2: Clip to valid range
211
+ T = np.clip(T, x_min, x_max)
212
+
213
+ # Step 3: Apply isotonic regression via interpolation
214
+ # This is the CRITICAL step that was missing!
215
+ calibrated = np.interp(T, x0, y0)
216
+
217
+ year_predictions.append(calibrated)
218
+
219
+ if len(year_predictions) == 0:
220
+ # No valid calibrators, use raw scores
221
+ calibrated_scores.append(scores[:, year])
222
+ else:
223
+ # Average predictions from all calibrators (like SimpleClassifierGroup)
224
+ calibrated_scores.append(np.mean(year_predictions, axis=0))
225
+
226
+ return np.stack(calibrated_scores, axis=1)
227
+
228
+ def preprocess_dicom(self, dicom_paths: List[str]) -> torch.Tensor:
229
+ """
230
+ Preprocess DICOM files for model input.
231
+
232
+ Args:
233
+ dicom_paths: List of paths to DICOM files
234
+
235
+ Returns:
236
+ Preprocessed tensor ready for model input
237
+ """
238
+ # Use the image processor to handle DICOM files
239
+ result = self.image_processor(dicom_paths, file_type="dicom", return_tensors="pt")
240
+ pixel_values = result["pixel_values"]
241
+
242
+ # Ensure we have 5D tensor (B, C, D, H, W)
243
+ if pixel_values.ndim == 4:
244
+ pixel_values = pixel_values.unsqueeze(0) # Add batch dimension
245
+
246
+ return pixel_values.to(self.device)
247
+
248
+ def predict(self, dicom_paths: List[str], return_attentions: bool = False) -> SybilOutput:
249
+ """
250
+ Run prediction on a CT scan series.
251
+
252
+ Args:
253
+ dicom_paths: List of paths to DICOM files for a single CT series
254
+ return_attentions: Whether to return attention maps
255
+
256
+ Returns:
257
+ SybilOutput with risk scores and optional attention maps
258
+ """
259
+ # Preprocess the DICOM files
260
+ pixel_values = self.preprocess_dicom(dicom_paths)
261
+
262
+ # Run inference with ensemble
263
+ all_predictions = []
264
+ all_attentions = []
265
+
266
+ with torch.no_grad():
267
+ for model in self.models:
268
+ output = model(
269
+ pixel_values=pixel_values,
270
+ return_attentions=return_attentions
271
+ )
272
+
273
+ # Extract risk scores
274
+ if hasattr(output, 'risk_scores'):
275
+ predictions = output.risk_scores
276
+ else:
277
+ predictions = output[0] if isinstance(output, tuple) else output
278
+
279
+ all_predictions.append(predictions.cpu().numpy())
280
+
281
+ if return_attentions and hasattr(output, 'image_attention'):
282
+ all_attentions.append(output.image_attention)
283
+
284
+ # Average ensemble predictions
285
+ ensemble_pred = np.mean(all_predictions, axis=0)
286
+
287
+ # Apply calibration
288
+ calibrated_pred = self._apply_calibration(ensemble_pred)
289
+
290
+ # Convert back to torch tensor
291
+ risk_scores = torch.from_numpy(calibrated_pred).float()
292
+
293
+ # Average attentions if requested
294
+ attentions = None
295
+ if return_attentions and all_attentions:
296
+ attentions = {"image_attention": torch.stack(all_attentions).mean(dim=0)}
297
+
298
+ return SybilOutput(risk_scores=risk_scores, attentions=attentions)
299
+
300
+ def __call__(self, dicom_paths: List[str] = None, dicom_series: List[List[str]] = None, **kwargs) -> SybilOutput:
301
+ """
302
+ Convenience method for prediction.
303
+
304
+ Args:
305
+ dicom_paths: List of DICOM file paths for a single series
306
+ dicom_series: List of lists of DICOM paths for batch processing
307
+ **kwargs: Additional arguments passed to predict()
308
+
309
+ Returns:
310
+ SybilOutput with predictions
311
+ """
312
+ if dicom_series is not None:
313
+ # Batch processing
314
+ all_outputs = []
315
+ for paths in dicom_series:
316
+ output = self.predict(paths, **kwargs)
317
+ all_outputs.append(output.risk_scores)
318
+
319
+ risk_scores = torch.stack(all_outputs)
320
+ return SybilOutput(risk_scores=risk_scores)
321
+ elif dicom_paths is not None:
322
+ return self.predict(dicom_paths, **kwargs)
323
+ else:
324
+ raise ValueError("Either dicom_paths or dicom_series must be provided")
325
+
326
+ @classmethod
327
+ def from_pretrained(cls, pretrained_model_name_or_path: str, **kwargs):
328
+ """
329
+ Load model from Hugging Face hub or local path.
330
+
331
+ Args:
332
+ pretrained_model_name_or_path: HF model ID or local path
333
+ **kwargs: Additional configuration arguments
334
+
335
+ Returns:
336
+ SybilHFWrapper instance
337
+ """
338
+ # Load configuration
339
+ config = kwargs.pop("config", None)
340
+ if config is None:
341
+ try:
342
+ config = SybilConfig.from_pretrained(pretrained_model_name_or_path)
343
+ except:
344
+ config = SybilConfig()
345
+
346
+ return cls(config=config)
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ torch>=2.0.0
2
+ torchvision>=0.15.0
3
+ transformers>=4.30.0
4
+ pydicom>=2.3.0
5
+ torchio>=0.19.0
6
+ SimpleITK>=2.2.0
7
+ numpy>=1.21.0
8
+ Pillow>=9.0.0
9
+ sybil>=1.2.0